def run_collecting(self): if len(self.process_info) > 0: # perf on given process(es) for pid, pname in self.process_info.items(): cmd = self.build_record_cmd(pid, pname) # TODO: unified output: "Now perf recording %s(%d)..." % (pname, pid) stdout, stderr = util.run_cmd(cmd) if stdout: fileopt.write_file( path.join(self.outdir, "%s.stdout" % pname), stdout) if stderr: logging.warning( "Command '%s' returned error: %s" % (cmd, stderr)) if self.options.archive: cmd = self.build_archive_cmd(pid, pname) stdout, stderr = util.run_cmd(cmd) if stderr: logging.warning( "Command '%s' returned error: %s" % (cmd, stderr)) else: # perf the entire system cmd = self.build_record_cmd() stdout, stderr = util.run_cmd(cmd) if stdout: fileopt.write_file( path.join(self.outdir, "perf.stdout"), stdout) if stderr: logging.warning("Command '%s' returned error: %s" % (cmd, stderr)) if self.options.archive: cmd = self.build_archive_cmd() stdout, stderr = util.run_cmd(cmd) if stderr: logging.warning( "Command '%s' returned error: %s" % (cmd, stderr))
def save_trace(self, cwd, outputdir=None): stderr = util.run_cmd(["cd", self.tracefs])[1] if stderr: logging.fatal( """ERROR: accessing tracing. Root user? Kernel has FTRACE? debugfs mounted? (mount -t debugfs debugfs /sys/kernel/debug)""") return if not outputdir: logging.fatal("ERROR: please give a dir to save trace data") return util.chdir(self.tracefs) # setup trace, set opt stderr = util.run_cmd(["echo nop > current_tracer"], shell=True)[1] if stderr: logging.fatal("ERROR: reset current_tracer failed") os.chdir(cwd) return bufsize_kb = self.options.bufsize if self.options.bufsize else 4096 stderr = util.run_cmd(["echo %s > buffer_size_kb" % bufsize_kb], shell=True)[1] if stderr: logging.fatal("ERROR: set bufsize_kb failed") os.chdir(cwd) return # begin tracing for event in [self.direct_reclaim_begin, self.direct_reclaim_end]: _, stderr = util.run_cmd(["echo 1 > %s" % event], shell=True) if stderr: logging.fatal("ERROR: enable %s tracepoint failed" % event) os.chdir(cwd) return # collect trace time = self.options.time if self.options.time else 60 util.run_cmd_for_a_while(["cat trace_pipe > %s/drtrace" % self.outdir], time, shell=True) # End tracing for event in [self.direct_reclaim_begin, self.direct_reclaim_end]: stderr = util.run_cmd(["echo 0 > %s" % event], shell=True)[1] if stderr: logging.fatal("ERROR: disable %s tracepoint failed" % event) os.chdir(cwd) return os.chdir(cwd)
def get_xterm(self): try: result = run_cmd("echo $TERM").strip() if "xterm" in result.lower(): self.xterm = True except: pass
def compress_tarball(output_base=None, output_name=None): # compress output files to tarball os.chdir(output_base) cmd = [ "tar", "--remove-files", "-zcf", "%s.tar.gz" % output_name, output_name ] stdout, stderr = util.run_cmd(cmd) if not stderr and stderr != '': logging.info("tar stderr: %s" % stderr) if not stdout and stdout != '': logging.debug("tar output: %s" % stdout)
def save_sysconf(self): cmd = ["sysctl", "-a"] path_limit_file = "/etc/security/limits.conf" # save limits.conf self.save_to_dir(srcfile=path_limit_file) # save `sysctl -a` stdout, stderr = util.run_cmd(cmd) if stdout: fileopt.write_file(os.path.join(self.outdir, "sysctl.conf"), stdout) if stderr: logging.warn("Reading limits.conf returned error: %s" % stderr)
def exec_importer(self, file=None, chunk_size=2000): if not file: logging.fatal("No file specified.") return (None, "No metric dump file specified to load.") base_dir = os.path.join(util.pwd(), "../") importer = os.path.join(base_dir, "bin/prom2influx") cmd = [ importer, "-db", self.db_name, "-host", self.host, "-port", "%s" % self.port, "-chunk", "%s" % chunk_size, # chunk size of one write request "-file", file ] logging.debug("Running cmd: %s" % ' '.join(cmd)) return util.run_cmd(cmd)
def collector(self, args): # call `collector` and store data to output dir base_dir = os.path.join(util.pwd(), "../") collector_exec = os.path.join(base_dir, "bin/collector") collector_outdir = fileopt.create_dir( os.path.join(self.full_outdir, "collector")) if args.pid: logging.debug("Collecting process infor only for PID %s" % args.pid) collector_exec = [collector_exec, '-proc', '-pid', '%s' % args.pid] elif args.port: protocol = 'UDP' if args.udp else 'TCP' pids = ','.join( str(_pid) for _pid in proc_meta.find_process_by_port( args.port, protocol)) logging.debug("Collecting process infor for PIDs %s" % pids) collector_exec = [collector_exec, '-proc', '-pid', '%s' % pids] # else call collector without any argument stdout, stderr = util.run_cmd(collector_exec) if stderr: logging.info("collector output:" % str(stderr)) try: self.collector_data = json.loads(stdout) except json.JSONDecodeError: logging.critical("Error collecting system info:\n%s" % stderr) return # save various info to seperate .json files for k, v in self.collector_data.items(): # This is a dirty hack to omit empty results, until Go fix that upstream, # see: https://github.com/golang/go/issues/11939 if (args.pid or args.port) and k in ['sysinfo', 'ntp']: continue if not v or len(v) < 1: logging.debug("Skipped empty result %s:%s" % (k, v)) continue fileopt.write_file(os.path.join(collector_outdir, "%s.json" % k), json.dumps(v, indent=2))
def collector(self): # TODO: warn on non-empty output dir # call `collector` and store data to output dir base_dir = os.path.join(util.pwd(), "../") collector_exec = os.path.join(base_dir, "bin/collector") collector_outdir = fileopt.create_dir( os.path.join(self.full_outdir, "collector")) stdout, stderr = util.run_cmd(collector_exec) if stderr: logging.info("collector output:" % str(stderr)) try: self.collector_data = json.loads(stdout) except json.JSONDecodeError: logging.critical("Error collecting system info:\n%s" % stderr) return # save various info to seperate .json files for k, v in self.collector_data.items(): fileopt.write_file(os.path.join(collector_outdir, "%s.json" % k), json.dumps(v, indent=2))
def run_vmtouch(self, args): if args.subcmd_runtime != "vmtouch": logging.debug("Ingoring collecting of vmtouch data.") return if not args.target: return base_dir = os.path.join(util.pwd(), "../") vmtouch_exec = os.path.join(base_dir, "bin/vmtouch") vmtouch_outdir = fileopt.create_dir( os.path.join(self.full_outdir, "vmtouch")) if not vmtouch_outdir: return stdout, stderr = util.run_cmd([vmtouch_exec, "-v", args.target]) if stderr: logging.info("vmtouch output: %s" % str(stderr)) return fileopt.write_file( os.path.join( vmtouch_outdir, "%s_%d.txt" % (args.target.replace("/", "_"), (time.time() * 1000))), str(stdout))
def lsof(pid): cmd = ["lsof", "-p", "%s" % pid] return util.run_cmd(cmd)
def get_base_info(cls, files, param): default_file = files[0] diff_file = files[1] cmd = "diff {} {} {}".format(diff_file, default_file, param) result = util.run_cmd(cmd) return result
def du_total(filepath): # TODO: support relative path, this require `collector` to output cwd of process cmd = ["du", "-s", str(filepath)] return util.run_cmd(cmd)
def du_subfiles(filepath): # TODO: support relative path, this require `collector` to output cwd of process filelist = glob(filepath + "/*") cmd = ["du", "-s"] + filelist return util.run_cmd(cmd)