def __enter__(self): # type: () -> ProgramDataFetcher if config.monitoring_core == "cmc": # Warning: # The preexec_fn parameter is not safe to use in the presence of threads in your # application. The child process could deadlock before exec is called. If you # must use it, keep it trivial! Minimize the number of libraries you call into. # # Note: # If you need to modify the environment for the child use the env parameter # rather than doing it in a preexec_fn. The start_new_session parameter can take # the place of a previously common use of preexec_fn to call os.setsid() in the # child. self._process = subprocess.Popen( self._cmdline, shell=True, stdin=subprocess.PIPE if self._stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, start_new_session=True, close_fds=True, ) else: # We can not create a separate process group when running Nagios # Upon reaching the service_check_timeout Nagios only kills the process # group of the active check. self._process = subprocess.Popen( self._cmdline, shell=True, stdin=subprocess.PIPE if self._stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) return self
def _get_agent_info_program(self, commandline, command_stdin): # type: (Union[bytes, Text], Optional[bytes]) -> RawAgentData exepath = commandline.split()[0] # for error message, hide options! self._logger.debug("Calling external program %r" % (commandline)) p = None try: if config.monitoring_core == "cmc": p = subprocess.Popen( # nosec commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True, ) else: # We can not create a separate process group when running Nagios # Upon reaching the service_check_timeout Nagios only kills the process # group of the active check. p = subprocess.Popen( # nosec commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) if command_stdin: stdout, stderr = p.communicate(input=ensure_bytestr(command_stdin)) else: stdout, stderr = p.communicate() exitstatus = p.returncode except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if p: os.killpg(os.getpgid(p.pid), signal.SIGTERM) p.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if p: if p.stdout is None or p.stderr is None: raise Exception("stdout needs to be set") p.stdout.close() p.stderr.close() if exitstatus: if exitstatus == 127: raise MKAgentError("Program '%s' not found (exit code 127)" % exepath) else: raise MKAgentError("Agent exited with code %d: %s" % (exitstatus, stderr)) return stdout
def _execute_bash_commands(self, commands, debug=False): # type: (List[Command], bool) -> None if not commands: return for command in commands: if debug: self._logger.debug(" ".join(command)) try: p = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, encoding="utf-8", ) stdout, stderr = p.communicate() if p.returncode != 0: raise MKGeneralException( _("Activate changes error. Unable to prepare site snapshots. Failed command: %r; StdOut: %r; StdErr: %s" ) % (command, stdout, stderr)) except OSError as e: raise MKGeneralException( _("Activate changes error. Unable to prepare site snapshots. Failed command: %r, Exception: %s" ) % (command, e))
def do_notify(event_server, logger, event, username=None, is_cancelling=False): # type: (Any, Logger, Any, bool, bool) -> None if _core_has_notifications_disabled(event, logger): return context = _create_notification_context(event_server, event, username, is_cancelling, logger) if logger.isEnabledFor(VERBOSE): logger.log(VERBOSE, "Sending notification via Check_MK with the following context:") for varname, value in sorted(context.items()): logger.log(VERBOSE, " %-25s: %s", varname, value) if context["HOSTDOWNTIME"] != "0": logger.info("Host %s is currently in scheduled downtime. " "Skipping notification of event %s." % (context["HOSTNAME"], event["id"])) return # Send notification context via stdin. context_string = "".join( ["%s=%s\n" % (varname, value.replace("\n", "\\n")) for (varname, value) in context.items()]) p = subprocess.Popen( ["cmk", "--notify", "stdin"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, encoding="utf-8", ) stdout, _stderr = p.communicate(input=context_string) status = p.returncode if status: logger.error("Error notifying via Check_MK: %s" % stdout.strip()) else: logger.info("Successfully forwarded notification for event %d to Check_MK" % event["id"])
def _send_email(config, to, subject, body, logger): # type: (Dict[str, Any], Any, Any, Any, Logger) -> bool command_utf8 = [ "mail", "-S", "sendcharsets=utf-8", "-s", subject.encode("utf-8"), to.encode("utf-8") ] if config["debug_rules"]: logger.info(" Executing: %s" % " ".join(command_utf8)) p = subprocess.Popen( command_utf8, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, encoding="utf-8", ) # FIXME: This may lock on too large buffer. We should move all "mail sending" code # to a general place and fix this for all our components (notification plugins, # notify.py, this one, ...) stdout, stderr = p.communicate(input=body) exitcode = p.returncode logger.info(' Exitcode: %d' % exitcode) if exitcode != 0: logger.info(" Error: Failed to send the mail.") for line in (stdout + stderr).splitlines(): logger.info(" Output: %s" % line.rstrip()) return False return True
def extract_domain(domain, tar_member): try: target_dir = domain.get("prefix") if not target_dir: return [] # The complete tar.gz file never fits in stringIO buffer.. tar.extract(tar_member, restore_dir) command = [ "tar", "xzf", "%s/%s" % (restore_dir, tar_member.name), "-C", target_dir ] p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) _stdout, stderr = p.communicate() exit_code = p.wait() if exit_code: return ["%s - %s" % (domain["title"], stderr)] except Exception as e: return ["%s - %s" % (domain["title"], str(e))] return []
def walk(self, snmp_config, oid, check_plugin_name=None, table_base_oid=None, context_name=None): # type: (SNMPHostConfig, str, Optional[str], Optional[str], Optional[str]) -> SNMPRowInfo protospec = self._snmp_proto_spec(snmp_config) ipaddress = snmp_config.ipaddress if snmp_config.is_ipv6_primary: ipaddress = "[" + ipaddress + "]" portspec = self._snmp_port_spec(snmp_config) command = self._snmp_walk_command(snmp_config, context_name) command += [ "-OQ", "-OU", "-On", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid ] console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) snmp_process = None exitstatus = None rowinfo = [] # type: SNMPRowInfo try: snmp_process = subprocess.Popen(command, close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8") rowinfo = self._get_rowinfo_from_snmp_process(snmp_process) except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if snmp_process: os.kill(snmp_process.pid, signal.SIGTERM) snmp_process.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if snmp_process: exitstatus = snmp_process.wait() if snmp_process.stderr: error = snmp_process.stderr.read() snmp_process.stderr.close() if snmp_process.stdout: snmp_process.stdout.close() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error: %s\n" % six.ensure_str(error).strip()) raise MKSNMPError( "SNMP Error on %s: %s (Exit-Code: %d)" % (ipaddress, six.ensure_str(error).strip(), exitstatus)) return rowinfo
def get(self, snmp_config, oid, context_name=None): # type: (SNMPHostConfig, OID, Optional[ContextName]) -> Optional[RawValue] if oid.endswith(".*"): oid_prefix = oid[:-2] commandtype = "getnext" else: oid_prefix = oid commandtype = "get" protospec = self._snmp_proto_spec(snmp_config) ipaddress = snmp_config.ipaddress if snmp_config.is_ipv6_primary: ipaddress = "[" + ipaddress + "]" portspec = self._snmp_port_spec(snmp_config) command = ( self._snmp_base_command(commandtype, snmp_config, context_name) + [ "-On", "-OQ", "-Oe", "-Ot", "%s%s%s" % (protospec, ipaddress, portspec), oid_prefix ]) console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) snmp_process = subprocess.Popen( command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) exitstatus = snmp_process.wait() if snmp_process.stderr is None or snmp_process.stdout is None: raise TypeError() if exitstatus: console.verbose(tty.red + tty.bold + "ERROR: " + tty.normal + "SNMP error\n") console.verbose(snmp_process.stderr.read() + "\n") return None line = snmp_process.stdout.readline().strip() if not line: console.verbose("Error in response to snmpget.\n") return None parts = line.split("=", 1) if len(parts) != 2: return None item = parts[0] value = parts[1].strip() console.vverbose("SNMP answer: ==> [%s]\n" % value) if value.startswith('No more variables') or \ value.startswith('End of MIB') or \ value.startswith('No Such Object available') or \ value.startswith('No Such Instance currently exists'): return None # In case of .*, check if prefix is the one we are looking for if commandtype == "getnext" and not item.startswith(oid_prefix + "."): return None return strip_snmp_value(value)
def _execute_script(event_columns, body, event, logger): # type: (Any, Any, Any, Any) -> None script_env = os.environ.copy() for key, value in _get_event_tags(event_columns, event).items(): if isinstance(key, str): key = key.encode("utf-8") if isinstance(value, str): value = value.encode("utf-8") script_env["CMK_" + key.upper()] = value # Traps can contain 0-Bytes. We need to remove this from the script # body. Otherwise suprocess.Popen will crash. p = subprocess.Popen( ['/bin/bash'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, env=script_env, encoding="utf-8", ) stdout, _stderr = p.communicate(input=body) logger.info(' Exit code: %d' % p.returncode) if stdout: logger.info(' Output: \'%s\'' % stdout)
def do_check_nagiosconfig(): command = [ cmk.utils.paths.nagios_binary, "-vp", cmk.utils.paths.nagios_config_file ] cmk_base.console.verbose("Running '%s'\n" % subprocess.list2cmdline(command)) cmk_base.console.output("Validating Nagios configuration...") p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, encoding="utf-8", ) stdout, stderr = p.communicate() exit_status = p.returncode if not exit_status: cmk_base.console.output(tty.ok + "\n") return True cmk_base.console.output("ERROR:\n") cmk_base.console.output(stdout, stderr) return False
def _ping(address): # type: (HostAddress) -> bool return subprocess.Popen(['ping', '-c2', '-w2', address], stdout=open(os.devnull, "a"), stderr=subprocess.STDOUT, encoding="utf-8", close_fds=True).wait() == 0
def test_cmk_subprocess_input_no_errors(command, input_, encoding): p = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=encoding, ) _check_type_of_stdout_and_stderr(p.communicate(), encoding)
def _get_files_to_check(pylint_test_dir): p = subprocess.Popen( [ "%s/scripts/find-python-files" % repo_path(), str(sys.version_info[0]) ], stdout=subprocess.PIPE, encoding="utf-8", shell=False, close_fds=True, ) stdout = p.communicate()[0] files = [] for fname in stdout.splitlines(): # Thin out these excludes some day... rel_path = fname[len(repo_path()) + 1:] # Can currently not be checked alone. Are compiled together below if rel_path.startswith("checks/") or \ rel_path.startswith("inventory/") or \ rel_path.startswith("agents/bakery/") or \ rel_path.startswith("enterprise/agents/bakery/"): continue # TODO: We should also test them... if rel_path == "werk" \ or rel_path.startswith("tests/") \ or rel_path.startswith("scripts/") \ or rel_path.startswith("agents/wnx/integration/"): continue # TODO: disable random, not that important stuff if rel_path.startswith("agents/windows/it/") \ or rel_path.startswith("agents/windows/msibuild/") \ or rel_path.startswith("doc/") \ or rel_path.startswith("livestatus/api/python/example") \ or rel_path.startswith("livestatus/api/python/make_"): continue files.append(fname) # Add the compiled files for things that are no modules yet open(pylint_test_dir + "/__init__.py", "w") _compile_check_and_inventory_plugins(pylint_test_dir) if is_enterprise_repo(): _compile_bakery_plugins(pylint_test_dir) # Not checking compiled check, inventory, bakery plugins with Python 3 if sys.version_info[0] == 2: files += [ pylint_test_dir, ] return files
def _git_has_pending_changes(): try: p = subprocess.Popen(["git", "status", "--porcelain"], cwd=cmk.utils.paths.default_config_dir, stdout=subprocess.PIPE, encoding="utf-8") return p.stdout is not None and p.stdout.read() != "" except OSError as e: if e.errno == errno.ENOENT: return False # ignore missing git command raise
def test_cmk_subprocess_input_errors(command, input_, encoding, py3_error): p = subprocess.Popen( command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding=encoding, ) # from Python 3 subprocess: # AttributeError: 'bytes' object has no attribute 'encode' # TypeError: memoryview: a bytes-like object is required, not 'str' with pytest.raises(py3_error): p.communicate(input_)
def check_domain(domain, tar_member): # type: (DomainSpec, tarfile.TarInfo) -> List[Text] errors = [] prefix = domain["prefix"] def check_exists_or_writable(path_tokens): # type: (List[str]) -> bool if not path_tokens: return False if os.path.exists("/".join(path_tokens)): if os.access("/".join(path_tokens), os.W_OK): return True # exists and writable errors.append( _("Permission problem: Path not writable %s") % "/".join(path_tokens)) return False # not writable return check_exists_or_writable(path_tokens[:-1]) # The complete tar file never fits in stringIO buffer.. tar.extract(tar_member, restore_dir) # Older versions of python tarfile handle empty subtar archives :( # This won't work: subtar = tarfile.open("%s/%s" % (restore_dir, tar_member.name)) p = subprocess.Popen( ["tar", "tzf", "%s/%s" % (restore_dir, tar_member.name)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) stdout, stderr = p.communicate() if stderr: errors.append(_("Contains corrupt file %s") % tar_member.name) return errors for line in stdout: full_path = prefix + "/" + line path_tokens = full_path.split("/") check_exists_or_writable(path_tokens) # Cleanup os.unlink("%s/%s" % (restore_dir, tar_member.name)) return errors
def _current_monitoring_core(): # type: () -> Text try: p = subprocess.Popen( ["omd", "config", "show", "CORE"], close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=open(os.devnull, "w"), encoding="utf-8", ) return p.communicate()[0] except OSError as e: # Allow running unit tests on systems without omd installed (e.g. on travis) if e.errno != errno.ENOENT: raise return "UNKNOWN"
def activate(self): current_settings = self._load_site_config() settings = {} settings.update(self._to_omd_config(self.load())) settings.update(self._to_omd_config(self.load_site_globals())) config_change_commands = [] self._logger.debug("Set omd config: %r" % settings) for key, val in settings.items(): if key not in current_settings: continue # Skip settings unknown to current OMD if current_settings[key] == settings[key]: continue # Skip unchanged settings config_change_commands.append("%s=%s" % (key, val)) if not config_change_commands: self._logger.debug("Got no config change commands...") return self._logger.debug("Executing \"omd config change\"") self._logger.debug(" Commands: %r" % config_change_commands) p = subprocess.Popen( ["omd", "config", "change"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=True, encoding="utf-8", ) stdout, _stderr = p.communicate( input="\n".join(config_change_commands)) self._logger.debug(" Exit code: %d" % p.returncode) self._logger.debug(" Output: %r" % stdout) if p.returncode != 0: raise MKGeneralException( _("Failed to activate changed site " "configuration.\nExit code: %d\nConfig: %s\nOutput: %s") % (p.returncode, config_change_commands, stdout))
def send_mail_sendmail(m, target, from_address): cmd = [_sendmail_path()] if from_address: cmd += ['-F', from_address, "-f", from_address] cmd += ["-i", target.encode("utf-8")] try: p = subprocess.Popen( cmd, stdin=subprocess.PIPE, ) except OSError: raise Exception( "Failed to send the mail: /usr/sbin/sendmail is missing") p.communicate(input=m.as_string()) if p.returncode != 0: raise Exception("sendmail returned with exit code: %d" % p.returncode) sys.stdout.write("Spooled mail to local mail transmission agent\n") return 0
def activate(self): try: self._write_config_file() p = subprocess.Popen( ["omd", "restart", "rrdcached"], stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, encoding="utf-8", ) stdout, _stderr = p.communicate() if p.returncode != 0: raise Exception(stdout) return [] except Exception: logger.exception("error restarting rrdcached") return ["Failed to activate rrdcached configuration: %s" % (traceback.format_exc())]
def _git_command(args): command = ["git"] + [six.ensure_str(a) for a in args] logger.debug("GIT: Execute in %s: %s", cmk.utils.paths.default_config_dir, subprocess.list2cmdline(command)) try: p = subprocess.Popen(command, cwd=cmk.utils.paths.default_config_dir, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, encoding="utf-8") except OSError as e: if e.errno == errno.ENOENT: raise MKGeneralException( _("Error executing GIT command <tt>%s</tt>:<br><br>%s") % (subprocess.list2cmdline(command), e)) raise status = p.wait() if status != 0: out = u"" if p.stdout is None else six.ensure_text(p.stdout.read()) raise MKGeneralException( _("Error executing GIT command <tt>%s</tt>:<br><br>%s") % (subprocess.list2cmdline(command), out.replace("\n", "<br>\n")))
def scan_parents_of(config_cache, hosts, silent=False, settings=None): # type: (config.ConfigCache, List[HostName], bool, Optional[Dict[str, int]]) -> Gateways if settings is None: settings = {} if config.monitoring_host: nagios_ip = ip_lookup.lookup_ipv4_address(config.monitoring_host) else: nagios_ip = None os.putenv("LANG", "") os.putenv("LC_ALL", "") # Start processes in parallel procs = [] # type: List[Tuple[HostName, Optional[HostAddress], Union[str, subprocess.Popen]]] for host in hosts: console.verbose("%s " % host) try: ip = ip_lookup.lookup_ipv4_address(host) if ip is None: raise RuntimeError() command = [ "traceroute", "-w", "%d" % settings.get("timeout", 8), "-q", "%d" % settings.get("probes", 2), "-m", "%d" % settings.get("max_ttl", 10), "-n", ip ] console.vverbose("Running '%s'\n" % subprocess.list2cmdline(command)) procs.append((host, ip, subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, encoding="utf-8"))) except Exception as e: if cmk.utils.debug.enabled(): raise procs.append((host, None, "ERROR: %s" % e)) # Output marks with status of each single scan def dot(color, dot='o'): # type: (str, str) -> None if not silent: console.output(tty.bold + color + dot + tty.normal) # Now all run and we begin to read the answers. For each host # we add a triple to gateways: the gateway, a scan state and a diagnostic output gateways = [] # type: Gateways for host, ip, proc_or_error in procs: if isinstance(proc_or_error, six.string_types): lines = [proc_or_error] exitstatus = 1 else: exitstatus = proc_or_error.wait() if proc_or_error.stdout is None: raise RuntimeError() lines = [l.strip() for l in proc_or_error.stdout.readlines()] if exitstatus: dot(tty.red, '*') gateways.append( (None, "failed", 0, "Traceroute failed with exit code %d" % (exitstatus & 255))) continue if len(lines) == 1 and lines[0].startswith("ERROR:"): message = lines[0][6:].strip() console.verbose("%s: %s\n", host, message, stream=sys.stderr) dot(tty.red, "D") gateways.append((None, "dnserror", 0, message)) continue if len(lines) == 0: if cmk.utils.debug.enabled(): raise MKGeneralException( "Cannot execute %s. Is traceroute installed? Are you root?" % command) dot(tty.red, '!') continue if len(lines) < 2: if not silent: console.error("%s: %s\n" % (host, ' '.join(lines))) gateways.append((None, "garbled", 0, "The output of traceroute seem truncated:\n%s" % ("".join(lines)))) dot(tty.blue) continue # Parse output of traceroute: # traceroute to 8.8.8.8 (8.8.8.8), 30 hops max, 40 byte packets # 1 * * * # 2 10.0.0.254 0.417 ms 0.459 ms 0.670 ms # 3 172.16.0.254 0.967 ms 1.031 ms 1.544 ms # 4 217.0.116.201 23.118 ms 25.153 ms 26.959 ms # 5 217.0.76.134 32.103 ms 32.491 ms 32.337 ms # 6 217.239.41.106 32.856 ms 35.279 ms 36.170 ms # 7 74.125.50.149 45.068 ms 44.991 ms * # 8 * 66.249.94.86 41.052 ms 66.249.94.88 40.795 ms # 9 209.85.248.59 43.739 ms 41.106 ms 216.239.46.240 43.208 ms # 10 216.239.48.53 45.608 ms 47.121 ms 64.233.174.29 43.126 ms # 11 209.85.255.245 49.265 ms 40.470 ms 39.870 ms # 12 8.8.8.8 28.339 ms 28.566 ms 28.791 ms routes = [] # type: List[Optional[str]] for line in lines[1:]: parts = line.split() route = parts[1] if route.count('.') == 3: routes.append(route) elif route == '*': routes.append(None) # No answer from this router else: if not silent: console.error("%s: invalid output line from traceroute: '%s'\n" % (host, line)) if len(routes) == 0: error = "incomplete output from traceroute. No routes found." console.error("%s: %s\n" % (host, error)) gateways.append((None, "garbled", 0, error)) dot(tty.red) continue # Only one entry -> host is directly reachable and gets nagios as parent - # if nagios is not the parent itself. Problem here: How can we determine # if the host in question is the monitoring host? The user must configure # this in monitoring_host. if len(routes) == 1: if ip == nagios_ip: gateways.append((None, "root", 0, "")) # We are the root-monitoring host dot(tty.white, 'N') elif config.monitoring_host: gateways.append(((config.monitoring_host, nagios_ip, None), "direct", 0, "")) dot(tty.cyan, 'L') else: gateways.append((None, "direct", 0, "")) continue # Try far most route which is not identical with host itself ping_probes = settings.get("ping_probes", 5) skipped_gateways = 0 this_route = None # type: Optional[HostAddress] for r in routes[::-1]: if not r or (r == ip): continue # Do (optional) PING check in order to determine if that # gateway can be monitored via the standard host check if ping_probes: if not gateway_reachable_via_ping(r, ping_probes): console.verbose("(not using %s, not reachable)\n", r, stream=sys.stderr) skipped_gateways += 1 continue this_route = r break if not this_route: error = "No usable routing information" if not silent: console.error("%s: %s\n" % (host, error)) gateways.append((None, "notfound", 0, error)) dot(tty.blue) continue # TTLs already have been filtered out) gateway_ip = this_route gateway = _ip_to_hostname(config_cache, this_route) if gateway: console.verbose("%s(%s) ", gateway, gateway_ip) else: console.verbose("%s ", gateway_ip) # Try to find DNS name of host via reverse DNS lookup dns_name = _ip_to_dnsname(gateway_ip) gateways.append(((gateway, gateway_ip, dns_name), "gateway", skipped_gateways, "")) dot(tty.green, 'G') return gateways
def _do_create_snapshot(data): snapshot_name = data["snapshot_name"] work_dir = snapshot_dir + "/workdir/%s" % snapshot_name try: if not os.path.exists(work_dir): os.makedirs(work_dir) # Open / initialize files filename_target = "%s/%s" % (snapshot_dir, snapshot_name) filename_work = "%s/%s.work" % (work_dir, snapshot_name) open(filename_target, "w").close() def get_basic_tarinfo(name): tarinfo = tarfile.TarInfo(name) tarinfo.mtime = time.time() tarinfo.uid = 0 tarinfo.gid = 0 tarinfo.mode = 0o644 tarinfo.type = tarfile.REGTYPE return tarinfo # Initialize the snapshot tar file and populate with initial information tar_in_progress = tarfile.open(filename_work, "w") for key in ["comment", "created_by", "type"]: tarinfo = get_basic_tarinfo(key) encoded_value = data[key].encode("utf-8") tarinfo.size = len(encoded_value) tar_in_progress.addfile(tarinfo, io.BytesIO(encoded_value)) tar_in_progress.close() # Process domains (sorted) subtar_info = {} for name, info in sorted(_get_default_backup_domains().items()): prefix = info.get("prefix", "") filename_subtar = "%s.tar.gz" % name path_subtar = "%s/%s" % (work_dir, filename_subtar) paths = ["." if x[1] == "" else x[1] for x in info.get("paths", [])] command = [ "tar", "czf", path_subtar, "--ignore-failed-read", "--force-local", "-C", prefix ] + paths proc = subprocess.Popen( command, stdin=None, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=prefix, encoding="utf-8", ) _stdout, stderr = proc.communicate() exit_code = proc.wait() # Allow exit codes 0 and 1 (files changed during backup) if exit_code not in [0, 1]: raise MKGeneralException( "Error while creating backup of %s (Exit Code %d) - %s.\n%s" % (name, exit_code, stderr, command)) subtar_hash = sha256(open(path_subtar).read()).hexdigest() subtar_signed = sha256(subtar_hash + _snapshot_secret()).hexdigest() subtar_info[filename_subtar] = (subtar_hash, subtar_signed) # Append tar.gz subtar to snapshot command = ["tar", "--append", "--file=" + filename_work, filename_subtar] proc = subprocess.Popen( command, cwd=work_dir, close_fds=True, ) proc.communicate() exit_code = proc.wait() if os.path.exists(filename_subtar): os.unlink(filename_subtar) if exit_code != 0: raise MKGeneralException("Error on adding backup domain %s to tarfile" % name) # Now add the info file which contains hashes and signed hashes for # each of the subtars info = ''.join(['%s %s %s\n' % (k, v[0], v[1]) for k, v in subtar_info.items()]) + '\n' tar_in_progress = tarfile.open(filename_work, "a") tarinfo = get_basic_tarinfo("checksums") tarinfo.size = len(info) tar_in_progress.addfile(tarinfo, io.BytesIO(info)) tar_in_progress.close() shutil.move(filename_work, filename_target) finally: shutil.rmtree(work_dir)
def notify_mail(user_id, msg): users = userdb.load_users(lock=False) user = users.get(user_id) if not user: raise MKInternalError(_('This user does not exist.')) if not user.get('email'): raise MKInternalError(_('This user has no mail address configured.')) recipient_name = user.get('alias') if not recipient_name: recipient_name = user_id sender_name = users[config.user.id].get('alias') if not sender_name: sender_name = user_id # Code mostly taken from notify_via_email() from notify.py module subject = _('Check_MK: Notification') body = _('''Greetings %s, %s sent you a notification: --- %s --- ''') % (recipient_name, sender_name, msg['text']) if msg['valid_till']: body += _( 'This notification has been created at %s and is valid till %s.' ) % (time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg['time'])), time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(msg['valid_till']))) # FIXME: Maybe use the configured mail command for Check_MK-Notify one day # TODO: mail does not accept umlauts: "contains invalid character '\303'" in mail # addresses. handle this correctly. command = [ "mail", "-s", six.ensure_str(subject), six.ensure_str(user['email']) ] # Make sure that mail(x) is using UTF-8. Otherwise we cannot send notifications # with non-ASCII characters. Unfortunately we do not know whether C.UTF-8 is # available. If e.g. nail detects a non-Ascii character in the mail body and # the specified encoding is not available, it will silently not send the mail! # Our resultion in future: use /usr/sbin/sendmail directly. # Our resultion in the present: look with locale -a for an existing UTF encoding # and use that. for encoding in os.popen("locale -a 2>/dev/null"): l = encoding.lower() if "utf8" in l or "utf-8" in l or "utf.8" in l: encoding = encoding.strip() os.putenv("LANG", encoding) break else: raise MKInternalError( _('No UTF-8 encoding found in your locale -a! Please provide C.UTF-8 encoding.' )) try: p = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE, close_fds=True, encoding="utf-8", ) except OSError as e: raise MKInternalError( _('Mail could not be delivered. ' 'Failed to execute command "%s": %s') % (" ".join(command), e)) stdout, _stderr = p.communicate(input=body) exitcode = p.returncode if exitcode != 0: raise MKInternalError( _('Mail could not be delivered. Exit code of command is %r. ' 'Output is: %s') % (exitcode, stdout)) else: return True
def main(sys_argv=None): if sys_argv is None: sys_argv = sys.argv[1:] short_options = 'u:p:l:D:k:' long_options = [ 'help', 'debug', 'ipmi-command=', 'quiet-cache', 'sdr-cache-recreate', 'interpret-oem-data', 'output-sensor-state', 'ignore-not-available-sensors', 'driver-type=', 'output-sensor-thresholds' ] opt_debug = False hostname = None username = None password = None privilege_lvl = None ipmi_cmd_type = None try: opts, args = getopt.getopt(sys_argv, short_options, long_options) except getopt.GetoptError as err: sys.stderr.write("%s\n" % err) return 1 additional_opts = [] for o, a in opts: if o in ['--help']: agent_ipmi_sensors_usage() return 1 if o in ['--debug']: opt_debug = True # Common options elif o in ['--ipmi-command']: ipmi_cmd_type = a elif o in ['-u']: username = a elif o in ['-p']: password = a elif o in ['-l']: privilege_lvl = a # FreeIPMI options elif o in ['-D']: additional_opts += ["%s" % o, "%s" % a] elif o in ['--driver-type']: additional_opts += ["%s=%s" % (o, a)] elif o in ['-k']: additional_opts += ["%s" % o, "%s" % a] elif o in ['--quiet-cache']: additional_opts.append(o) elif o in ['--sdr-cache-recreate']: additional_opts.append(o) elif o in ['--interpret-oem-data']: additional_opts.append(o) elif o in ['--output-sensor-state']: additional_opts.append(o) elif o in ['--ignore-not-available-sensors']: additional_opts.append(o) elif o in ['--output-sensor-thresholds']: additional_opts.append(o) if len(args) == 1: hostname = args[0] else: sys.stderr.write("ERROR: Please specify exactly one host.\n") return 1 if not (username and password and privilege_lvl): sys.stderr.write("ERROR: Credentials are missing.\n") return 1 os.environ[ "PATH"] = "/usr/local/sbin:/usr/sbin:/sbin:" + os.environ["PATH"] if ipmi_cmd_type in [None, 'freeipmi']: ipmi_cmd = [ "ipmi-sensors", "-h", hostname, "-u", username, "-p", password, "-l", privilege_lvl ] + \ additional_opts queries = { "_sensors": ([], []) } # type: Dict[str, Tuple[List[str], List[str]]] elif ipmi_cmd_type == 'ipmitool': ipmi_cmd = [ "ipmitool", "-H", hostname, "-U", username, "-P", password, "-L", privilege_lvl ] # As in check_mk_agent queries = { "": (["sensor", "list"], ['command failed', 'discrete']), "_discrete": (["sdr", "elist", "compact"], []) } else: sys.stderr.write("ERROR: Unknown IPMI command '%s'.\n" % ipmi_cmd_type) return 1 ipmi_cmd_str = subprocess.list2cmdline(ipmi_cmd) if opt_debug: sys.stderr.write("Executing: '%s'\n" % ipmi_cmd_str) errors = [] for section, (types, excludes) in queries.items(): sys.stdout.write("<<<ipmi%s:sep(124)>>>\n" % section) try: try: p = subprocess.Popen( ipmi_cmd + types, close_fds=True, stdin=open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", ) except OSError as e: if e.errno == errno.ENOENT: # No such file or directory raise Exception("Could not find '%s' command (PATH: %s)" % (ipmi_cmd_type, os.environ.get("PATH"))) raise stdout, stderr = p.communicate() if stderr: errors.append(stderr) parse_data(stdout.splitlines(), excludes) except Exception as e: errors.append(str(e)) if errors: msg = "ERROR: '%s'.\n" % ", ".join(errors) sys.stderr.write(six.ensure_str(msg)) return 1 return 0
def _fetch_raw_data(commandline, command_stdin, logger): # type: (Union[bytes, Text], Optional[str], Logger) -> RawAgentData exepath = commandline.split()[0] # for error message, hide options! logger.debug("Calling external program %r" % (commandline)) p = None try: if config.monitoring_core == "cmc": if sys.version_info[0] >= 3: # Warning: # The preexec_fn parameter is not safe to use in the presence of threads in your # application. The child process could deadlock before exec is called. If you # must use it, keep it trivial! Minimize the number of libraries you call into. # # Note: # If you need to modify the environment for the child use the env parameter # rather than doing it in a preexec_fn. The start_new_session parameter can take # the place of a previously common use of preexec_fn to call os.setsid() in the # child. p = subprocess.Popen( commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, start_new_session=True, close_fds=True, ) else: # Python 2: start_new_session not available p = subprocess.Popen( # pylint: disable=subprocess-popen-preexec-fn commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid, close_fds=True, ) else: # We can not create a separate process group when running Nagios # Upon reaching the service_check_timeout Nagios only kills the process # group of the active check. p = subprocess.Popen( commandline, shell=True, stdin=subprocess.PIPE if command_stdin else open(os.devnull), stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) if command_stdin: stdout, stderr = p.communicate( input=ensure_bytestr(command_stdin)) else: stdout, stderr = p.communicate() exitstatus = p.returncode except MKTimeout: # On timeout exception try to stop the process to prevent child process "leakage" if p: os.killpg(os.getpgid(p.pid), signal.SIGTERM) p.wait() raise finally: # The stdout and stderr pipe are not closed correctly on a MKTimeout # Normally these pipes getting closed after p.communicate finishes # Closing them a second time in a OK scenario won't hurt neither.. if p: if p.stdout is None or p.stderr is None: raise Exception("stdout needs to be set") p.stdout.close() p.stderr.close() if exitstatus: if exitstatus == 127: raise MKAgentError("Program '%s' not found (exit code 127)" % six.ensure_str(exepath)) raise MKAgentError("Agent exited with code %d: %s" % (exitstatus, six.ensure_str(stderr))) return stdout
def check_mk_local_automation(command, args=None, indata="", stdin_data=None, timeout=None): # type: (str, Optional[Sequence[Union[str, Text]]], Any, Optional[str], Optional[int]) -> Any if args is None: args = [] new_args = [six.ensure_str(a) for a in args] if stdin_data is None: stdin_data = repr(indata) if timeout: new_args = ["--timeout", "%d" % timeout] + new_args cmd = ['check_mk', '--automation', command] + new_args if command in ['restart', 'reload']: call_hook_pre_activate_changes() cmd = [six.ensure_str(a) for a in cmd] try: # This debug output makes problems when doing bulk inventory, because # it garbles the non-HTML response output # if config.debug: # html.write("<div class=message>Running <tt>%s</tt></div>\n" % subprocess.list2cmdline(cmd)) auto_logger.info("RUN: %s" % subprocess.list2cmdline(cmd)) p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, encoding="utf-8") except Exception as e: raise _local_automation_failure(command=command, cmdline=cmd, exc=e) assert p.stdin is not None assert p.stdout is not None assert p.stderr is not None auto_logger.info("STDIN: %r" % stdin_data) p.stdin.write(stdin_data) p.stdin.close() outdata = p.stdout.read() exitcode = p.wait() auto_logger.info("FINISHED: %d" % exitcode) auto_logger.debug("OUTPUT: %r" % outdata) errdata = p.stderr.read() if errdata: auto_logger.warning("'%s' returned '%s'" % (" ".join(cmd), errdata)) if exitcode != 0: auto_logger.error("Error running %r (exit code %d)" % (subprocess.list2cmdline(cmd), exitcode)) raise _local_automation_failure(command=command, cmdline=cmd, code=exitcode, out=outdata, err=errdata) # On successful "restart" command execute the activate changes hook if command in ['restart', 'reload']: call_hook_activate_changes() try: return ast.literal_eval(outdata) except SyntaxError as e: raise _local_automation_failure(command=command, cmdline=cmd, out=outdata, exc=e)
def main(sys_argv=None): if sys_argv is None: sys_argv = sys.argv[1:] short_options = 'hu:p:t:m:i:k' long_options = [ 'help', 'user='******'debug', 'timeout=', 'profile', 'modules=', 'accept-any-hostkey' ] try: opts, args = getopt.getopt(sys_argv, short_options, long_options) except getopt.GetoptError as err: sys.stderr.write("%s\n" % err) return 1 opt_debug = False opt_timeout = 10 opt_any_hostkey = "" g_profile = None g_profile_path = "ibmsvc_profile.out" host_address = None user = None mortypes = ['all'] command_options = { "lshost": { "section_header": "ibm_svc_host", "active": False, "command": "lshost -delim :" }, "lslicense": { "section_header": "ibm_svc_license", "active": False, "command": "lslicense -delim :" }, "lsmdisk": { "section_header": "ibm_svc_mdisk", "active": False, "command": "lsmdisk -delim :" }, "lsmdiskgrp": { "section_header": "ibm_svc_mdiskgrp", "active": False, "command": "lsmdiskgrp -delim :" }, "lsnode": { "section_header": "ibm_svc_node", "active": False, "command": "lsnode -delim :" }, "lsnodestats": { "section_header": "ibm_svc_nodestats", "active": False, "command": "lsnodestats -delim :" }, "lssystem": { "section_header": "ibm_svc_system", "active": False, "command": "lssystem -delim :" }, "lssystemstats": { "section_header": "ibm_svc_systemstats", "active": False, "command": "lssystemstats -delim :" }, "lseventlog": { "section_header": "ibm_svc_eventlog", "active": False, "command": "lseventlog -expired no -fixed no -monitoring no -order severity -message no -delim : -nohdr" }, "lsportfc": { "section_header": "ibm_svc_portfc", "active": False, "command": "lsportfc -delim :" }, "lsenclosure": { "section_header": "ibm_svc_enclosure", "active": False, "command": "lsenclosure -delim :" }, "lsenclosurestats": { "section_header": "ibm_svc_enclosurestats", "active": False, "command": "lsenclosurestats -delim :" }, "lsarray": { "section_header": "ibm_svc_array", "active": False, "command": "lsarray -delim :" }, "lsportsas": { "section_header": "ibm_svc_portsas", "active": False, "command": "lsportsas -delim :" }, "disks": { "section_header": "ibm_svc_disks", "active": False, "command": "svcinfo lsdrive -delim :" }, } for o, a in opts: if o in ['--debug']: opt_debug = True elif o in ['--profile']: import cProfile g_profile = cProfile.Profile() g_profile.enable() elif o in ['-u', '--user']: user = a elif o in ['-i', '--modules']: mortypes = a.split(',') elif o in ['-t', '--timeout']: opt_timeout = int(a) elif o in ['-k', '--accept-any-hostkey']: opt_any_hostkey = "-o StrictHostKeyChecking=no" elif o in ['-h', '--help']: usage() sys.exit(0) if len(args) == 1: host_address = args[0] elif not args: sys.stderr.write("ERROR: No host given.\n") return 1 else: sys.stderr.write("ERROR: Please specify exactly one host.\n") return 1 if user is None: sys.stderr.write("ERROR: No user name given.\n") return 1 for module in command_options: try: if mortypes.index("all") >= 0: command_options[module]["active"] = True except ValueError: pass try: if mortypes.index(module) >= 0: command_options[module]["active"] = True except ValueError: pass ############################################################################# # fetch information by ssh ############################################################################# cmd = "ssh -o ConnectTimeout=%s %s %s@%s '" % ( opt_timeout, opt_any_hostkey, user, host_address) for module in command_options: if command_options[module]["active"]: cmd += r"echo \<\<\<%s:sep\(58\)\>\>\>;" % command_options[module][ "section_header"] cmd += "%s || true;" % command_options[module]["command"] cmd += "'" if opt_debug: sys.stderr.write("executing external command: %s\n" % cmd) result = subprocess.Popen( # nosec cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=None, encoding="utf-8", ) stdout, stderr = result.communicate() exit_code = result.wait() if exit_code not in [0, 1]: msg = "Error connecting via ssh: %s\n" % stderr sys.stderr.write(msg.encode("utf-8")) sys.exit(2) lines = stdout.split('\n') if lines[0].startswith("CMMVC7016E") or ( len(lines) > 1 and lines[1].startswith("CMMVC7016E")): sys.stderr.write(stdout.encode("utf-8")) sys.exit(2) # Quite strange.. Why not simply print stdout? for line in lines: print(line) if g_profile: g_profile.dump_stats(g_profile_path) show_profile = os.path.join(os.path.dirname(g_profile_path), 'show_profile.py') open(show_profile, "w")\ .write("#!/usr/bin/python\n" "import pstats\n" "stats = pstats.Stats('%s')\n" "stats.sort_stats('time').print_stats()\n" % g_profile_path) os.chmod(show_profile, 0o755) sys.stderr.write("Profile '%s' written. Please run %s.\n" % (g_profile_path, show_profile))