def run_collecting(self): if len(self.process_info) > 0: # perf on given process(es) for pid, pname in self.process_info.items(): cmd = self.build_record_cmd(pid, pname) # TODO: unified output: "Now perf recording %s(%d)..." % (pname, pid) stdout, stderr = util.run_cmd(cmd) if stdout: fileopt.write_file( path.join(self.outdir, "%s.stdout" % pname), stdout) if stderr: logging.warning( "Command '%s' returned error: %s" % (cmd, stderr)) if self.options.archive: cmd = self.build_archive_cmd(pid, pname) stdout, stderr = util.run_cmd(cmd) if stderr: logging.warning( "Command '%s' returned error: %s" % (cmd, stderr)) else: # perf the entire system cmd = self.build_record_cmd() stdout, stderr = util.run_cmd(cmd) if stdout: fileopt.write_file( path.join(self.outdir, "perf.stdout"), stdout) if stderr: logging.warning("Command '%s' returned error: %s" % (cmd, stderr)) if self.options.archive: cmd = self.build_archive_cmd() stdout, stderr = util.run_cmd(cmd) if stderr: logging.warning( "Command '%s' returned error: %s" % (cmd, stderr))
def run_collecting(self): if self.resolution < 15.0: logging.warn( "Sampling resolution < 15s don't increase accuracy but data size." ) for metric in self.get_label_names(): url = '%s/query_range?query=%s&start=%s&end=%s&step=%s' % ( self.url_base, metric, self.start_time, self.end_time, self.resolution) matrix = json.loads(util.read_url(url)[0]) if not matrix['status'] == 'success': logging.info("Error querying for key '%s'." % metric) logging.debug("Output is:\n%s" % matrix) continue if self.options.compress: metric_filename = '%s_%s_to_%s_%ss.dat' % ( metric, self.start_time, self.end_time, self.resolution) fileopt.write_file( os.path.join(self.outdir, metric_filename), zlib.compress(json.dumps(matrix['data']['result']))) else: metric_filename = '%s_%s_to_%s_%ss.json' % ( metric, self.start_time, self.end_time, self.resolution) fileopt.write_file(os.path.join(self.outdir, metric_filename), json.dumps(matrix['data']['result'])) logging.debug("Saved data for key '%s'." % metric)
def get_datadir_size(self): # du requires root priviledge to check data-dir if not util.is_root_privilege(): logging.fatal( "It's required to check data-dir size with root priviledge.") return for proc in self.collector_data["proc_stats"]: args = util.parse_cmdline(proc["cmd"]) try: data_dir = args["data-dir"] except KeyError: logging.debug("'data-dir' is not set in cmdline args: %s" % args) continue if os.listdir(data_dir) != []: stdout, stderr = space.du_subfiles(data_dir) else: stdout, stderr = space.du_total(data_dir) if stdout: fileopt.write_file( os.path.join(self.full_outdir, "size-%s" % proc["pid"]), stdout) if stderr: fileopt.write_file( os.path.join(self.full_outdir, "size-%s.err" % proc["pid"]), stderr)
def run_collecting(self): info = self.read_api() if info: fileopt.write_file( os.path.join(self.outdir, "%s_%s-tidb-info.json" % (self.host, self.port)), info)
def run_collecting(self): for key, uri in self.uri_map.items(): data = self.read_api(self.url_base + uri) if data: fileopt.write_file( os.path.join( self.outdir, "%s_%s-tidb-%s.json" % (self.host, self.port, key)), data)
def query_worker(self, metric): url = '%s/query_range?query=%s&start=%s&end=%s&step=%s' % ( self.url_base, metric, self.start_time, self.end_time, self.resolution) response = util.read_url(url)[0] if 'success' not in response[:20].decode('utf-8'): logging.error("Error querying for key '%s'." % metric) logging.debug("Output is:\n%s" % response) return metric_filename = '%s_%s_to_%s_%ss.json' % ( metric, self.start_time, self.end_time, self.resolution) fileopt.write_file(os.path.join(self.outdir, metric_filename), response) logging.debug("Saved data for key '%s'." % metric)
def save_sysconf(self): cmd = ["sysctl", "-a"] path_limit_file = "/etc/security/limits.conf" # save limits.conf self.save_to_dir(srcfile=path_limit_file) # save `sysctl -a` stdout, stderr = util.run_cmd(cmd) if stdout: fileopt.write_file(os.path.join(self.outdir, "sysctl.conf"), stdout) if stderr: logging.warn("Reading limits.conf returned error: %s" % stderr)
def get_lsof_tidb(self): # lsof requires root priviledge if not util.is_root_privilege(): logging.fatal("It's required to run lsof with root priviledge.") return for proc in self.collector_data["proc_stats"]: stdout, stderr = lsof.lsof(proc["pid"]) if stdout: fileopt.write_file( os.path.join(self.full_outdir, "lsof-%s") % proc["pid"], stdout) if stderr: fileopt.write_file( os.path.join(self.full_outdir, "lsof-%s.err" % proc["pid"]), stderr)
def collector(self, args): # call `collector` and store data to output dir base_dir = os.path.join(util.pwd(), "../") collector_exec = os.path.join(base_dir, "bin/collector") collector_outdir = fileopt.create_dir( os.path.join(self.full_outdir, "collector")) if args.pid: logging.debug("Collecting process infor only for PID %s" % args.pid) collector_exec = [collector_exec, '-proc', '-pid', '%s' % args.pid] elif args.port: protocol = 'UDP' if args.udp else 'TCP' pids = ','.join( str(_pid) for _pid in proc_meta.find_process_by_port( args.port, protocol)) logging.debug("Collecting process infor for PIDs %s" % pids) collector_exec = [collector_exec, '-proc', '-pid', '%s' % pids] # else call collector without any argument stdout, stderr = util.run_cmd(collector_exec) if stderr: logging.info("collector output:" % str(stderr)) try: self.collector_data = json.loads(stdout) except json.JSONDecodeError: logging.critical("Error collecting system info:\n%s" % stderr) return # save various info to seperate .json files for k, v in self.collector_data.items(): # This is a dirty hack to omit empty results, until Go fix that upstream, # see: https://github.com/golang/go/issues/11939 if (args.pid or args.port) and k in ['sysinfo', 'ntp']: continue if not v or len(v) < 1: logging.debug("Skipped empty result %s:%s" % (k, v)) continue fileopt.write_file(os.path.join(collector_outdir, "%s.json" % k), json.dumps(v, indent=2))
def collector(self): # TODO: warn on non-empty output dir # call `collector` and store data to output dir base_dir = os.path.join(util.pwd(), "../") collector_exec = os.path.join(base_dir, "bin/collector") collector_outdir = fileopt.create_dir( os.path.join(self.full_outdir, "collector")) stdout, stderr = util.run_cmd(collector_exec) if stderr: logging.info("collector output:" % str(stderr)) try: self.collector_data = json.loads(stdout) except json.JSONDecodeError: logging.critical("Error collecting system info:\n%s" % stderr) return # save various info to seperate .json files for k, v in self.collector_data.items(): fileopt.write_file(os.path.join(collector_outdir, "%s.json" % k), json.dumps(v, indent=2))
def run_vmtouch(self, args): if args.subcmd_runtime != "vmtouch": logging.debug("Ingoring collecting of vmtouch data.") return if not args.target: return base_dir = os.path.join(util.pwd(), "../") vmtouch_exec = os.path.join(base_dir, "bin/vmtouch") vmtouch_outdir = fileopt.create_dir( os.path.join(self.full_outdir, "vmtouch")) if not vmtouch_outdir: return stdout, stderr = util.run_cmd([vmtouch_exec, "-v", args.target]) if stderr: logging.info("vmtouch output: %s" % str(stderr)) return fileopt.write_file( os.path.join( vmtouch_outdir, "%s_%d.txt" % (args.target.replace("/", "_"), (time.time() * 1000))), str(stdout))
def run_collecting(self): pd_health = self.read_health() if pd_health: fileopt.write_file( os.path.join(self.outdir, "%s_%s-health.json" % (self.host, self.port)), pd_health) pd_diagnose = self.read_diagnose() if pd_diagnose: fileopt.write_file( os.path.join(self.outdir, "%s_%s-diagnose.json" % (self.host, self.port)), pd_diagnose) for key, info in self.read_runtime_info().items(): if not info: continue fileopt.write_file( os.path.join(self.outdir, "%s_%s-%s.json" % (self.host, self.port, key)), info)