def do(self, sys_argv): options, args = self._parse_args(sys_argv, 4) dump_path = args[1] target_policy = args[2] component_name = args[3] depth = args[4] alternative_dirs_dict = {} policy_set = PolicySet.load( SubCommand._parse_policy_list(target_policy)) if not policy_set[target_policy].find_rule(component_name): sys.stderr.write("ERROR: Component %s not found in policy %s\n" % (component_name, target_policy)) return 1 if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(':'): target_path, host_path = alternative_dir_pair.split('@', 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dump) = SubCommand.load_basic_files( dump_path, False, alternative_dirs=alternative_dirs_dict) ExpandCommand._output(dump, policy_set[target_policy], bucket_set, component_name, int(depth), sys.stdout) return 0
def _set_up(self, sys_argv): options, args = self._parse_args(sys_argv, 1) dump_path = args[1] shared_first_dump_paths = args[2:] alternative_dirs_dict = {} if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(':'): target_path, host_path = alternative_dir_pair.split('@', 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dumps) = SubCommand.load_basic_files( dump_path, True, alternative_dirs=alternative_dirs_dict) self._timestamp = options.timestamp pfn_counts_dict = {} for shared_first_dump_path in shared_first_dump_paths: shared_dumps = SubCommand._find_all_dumps(shared_first_dump_path) for shared_dump in shared_dumps: pfn_counts = PFNCounts.load(shared_dump) if pfn_counts.pid not in pfn_counts_dict: pfn_counts_dict[pfn_counts.pid] = [] pfn_counts_dict[pfn_counts.pid].append(pfn_counts) policy_set = PolicySet.load(SubCommand._parse_policy_list(options.policy)) return policy_set, dumps, pfn_counts_dict, bucket_set
def _set_up(self, sys_argv): options, args = self._parse_args(sys_argv, 1) dump_path = args[1] shared_first_dump_paths = args[2:] alternative_dirs_dict = {} if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(':'): target_path, host_path = alternative_dir_pair.split('@', 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dumps) = SubCommand.load_basic_files( dump_path, True, alternative_dirs=alternative_dirs_dict) self._timestamp = options.timestamp pfn_counts_dict = {} for shared_first_dump_path in shared_first_dump_paths: shared_dumps = SubCommand._find_all_dumps(shared_first_dump_path) for shared_dump in shared_dumps: pfn_counts = PFNCounts.load(shared_dump) if pfn_counts.pid not in pfn_counts_dict: pfn_counts_dict[pfn_counts.pid] = [] pfn_counts_dict[pfn_counts.pid].append(pfn_counts) policy_set = PolicySet.load( SubCommand._parse_policy_list(options.policy)) return policy_set, dumps, pfn_counts_dict, bucket_set
def do(self, sys_argv, out=sys.stdout): _, args = self._parse_args(sys_argv, 2) dump_path = args[1] target_policy = args[2] (bucket_set, dumps) = SubCommand.load_basic_files(dump_path, True) policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy)) MapCommand._output(dumps, bucket_set, policy_set[target_policy], out) return 0
def do(self, sys_argv, out=sys.stdout): _, args = self._parse_args(sys_argv, 2) dump_path = args[1] target_policy = args[2] (bucket_set, dumps) = SubCommand.load_basic_files(dump_path, True) policy_set = PolicySet.load( SubCommand._parse_policy_list(target_policy)) MapCommand._output(dumps, bucket_set, policy_set[target_policy], out) return 0
def do(self, sys_argv): _, args = self._parse_args(sys_argv, 4) dump_path = args[1] target_policy = args[2] component_name = args[3] depth = args[4] (bucket_set, dump) = SubCommand.load_basic_files(dump_path, False) policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy)) ExpandCommand._output(dump, policy_set[target_policy], bucket_set, component_name, int(depth), sys.stdout) return 0
def do(self, sys_argv): _, args = self._parse_args(sys_argv, 4) dump_path = args[1] target_policy = args[2] component_name = args[3] depth = args[4] (bucket_set, dump) = SubCommand.load_basic_files(dump_path, False) policy_set = PolicySet.load( SubCommand._parse_policy_list(target_policy)) ExpandCommand._output(dump, policy_set[target_policy], bucket_set, component_name, int(depth), sys.stdout) return 0
def do(self, sys_argv): _, args = self._parse_args(sys_argv, 1) dump_path = args[1] (bucket_set, dump) = SubCommand.load_basic_files(dump_path, False) StacktraceCommand._output(dump, bucket_set, sys.stdout) return 0
def do(self, sys_argv, out=sys.stdout): _, args = self._parse_args(sys_argv, 1) dump_path = args[1] (bucket_set, _) = SubCommand.load_basic_files(dump_path, True) BucketsCommand._output(bucket_set, out) return 0
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 1) dump_path = args[1] # TODO(dmikurube): Support shared memory. alternative_dirs_dict = {} if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(':'): target_path, host_path = alternative_dir_pair.split('@', 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dumps) = SubCommand.load_basic_files( dump_path, True, alternative_dirs=alternative_dirs_dict) json_root = OrderedDict() json_root['version'] = 1 json_root['run_id'] = None for dump in dumps: if json_root['run_id'] and json_root['run_id'] != dump.run_id: LOGGER.error('Inconsistent heap profile dumps.') json_root['run_id'] = '' break json_root['run_id'] = dump.run_id json_root['snapshots'] = [] # Load all sorters. sorters = SorterSet() for dump in dumps: json_root['snapshots'].append( self._fill_snapshot(dump, bucket_set, sorters)) if options.indent: json.dump(json_root, sys.stdout, indent=2) else: json.dump(json_root, sys.stdout) print ''
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 2) dump_path = args[1] target_policy = args[2] component = options.component (bucket_set, dump) = SubCommand.load_basic_files(dump_path, False) policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy)) with open(SubCommand._find_prefix(dump_path) + '.maps', 'r') as maps_f: maps_lines = maps_f.readlines() PProfCommand._output( dump, policy_set[target_policy], bucket_set, maps_lines, component, sys.stdout) return 0
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 2) dump_path = args[1] target_policy = args[2] component = options.component (bucket_set, dump) = SubCommand.load_basic_files(dump_path, False) policy_set = PolicySet.load( SubCommand._parse_policy_list(target_policy)) with open(SubCommand._find_prefix(dump_path) + '.maps', 'r') as maps_f: maps_lines = maps_f.readlines() PProfCommand._output(dump, policy_set[target_policy], bucket_set, maps_lines, component, sys.stdout) return 0
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 1) dump_path = args[1] # TODO(dmikurube): Support shared memory. alternative_dirs_dict = {} if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(':'): target_path, host_path = alternative_dir_pair.split('@', 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dumps) = SubCommand.load_basic_files( dump_path, True, alternative_dirs=alternative_dirs_dict) # Load all sorters. sorters = SorterSet() json_root = OrderedDict() json_root['version'] = 1 json_root['run_id'] = None for dump in dumps: if json_root['run_id'] and json_root['run_id'] != dump.run_id: LOGGER.error('Inconsistent heap profile dumps.') json_root['run_id'] = '' break json_root['run_id'] = dump.run_id json_root['roots'] = [] for sorter in sorters: if sorter.root: json_root['roots'].append([sorter.world, sorter.name]) json_root['default_template'] = 'l2' json_root['templates'] = sorters.templates.as_dict() orders = OrderedDict() orders['worlds'] = OrderedDict() for world in ['vm', 'malloc']: orders['worlds'][world] = OrderedDict() orders['worlds'][world]['breakdown'] = OrderedDict() for sorter in sorters.iter_world(world): order = [] for rule in sorter.iter_rule(): if rule.name not in order: order.append(rule.name) orders['worlds'][world]['breakdown'][sorter.name] = order json_root['orders'] = orders json_root['snapshots'] = [] for dump in dumps: LOGGER.info('Sorting a dump %s...' % dump.path) json_root['snapshots'].append( self._fill_snapshot(dump, bucket_set, sorters)) if options.indent: json.dump(json_root, sys.stdout, indent=2) else: json.dump(json_root, sys.stdout) print ''
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 1) dump_path = args[1] # TODO(dmikurube): Support shared memory. alternative_dirs_dict = {} if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(":"): target_path, host_path = alternative_dir_pair.split("@", 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dumps) = SubCommand.load_basic_files(dump_path, True, alternative_dirs=alternative_dirs_dict) # Load all sorters. sorters = SorterSet() json_root = OrderedDict() json_root["version"] = 1 json_root["run_id"] = None json_root["roots"] = [] for sorter in sorters: if sorter.root: json_root["roots"].append([sorter.world, sorter.name]) json_root["default_template"] = "l2" json_root["templates"] = sorters.templates.as_dict() orders = OrderedDict() orders["worlds"] = OrderedDict() for world in ["vm", "malloc"]: orders["worlds"][world] = OrderedDict() orders["worlds"][world]["breakdown"] = OrderedDict() for sorter in sorters.iter_world(world): order = [] for rule in sorter.iter_rule(): if rule.name not in order: order.append(rule.name) orders["worlds"][world]["breakdown"][sorter.name] = order json_root["orders"] = orders json_root["snapshots"] = [] for dump in dumps: if json_root["run_id"] and json_root["run_id"] != dump.run_id: LOGGER.error("Inconsistent heap profile dumps.") json_root["run_id"] = "" else: json_root["run_id"] = dump.run_id LOGGER.info("Sorting a dump %s..." % dump.path) json_root["snapshots"].append(self._fill_snapshot(dump, bucket_set, sorters)) if options.indent: json.dump(json_root, sys.stdout, indent=2) else: json.dump(json_root, sys.stdout) print ""
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 2) dump_path = args[1] gs_path = args[2] dump_files = SubCommand._find_all_dumps(dump_path) bucket_files = SubCommand._find_all_buckets(dump_path) prefix = SubCommand._find_prefix(dump_path) symbol_data_sources = SymbolDataSources(prefix) symbol_data_sources.prepare() symbol_path = symbol_data_sources.path() handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof') os.close(handle_zip) try: file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED) for filename in dump_files: file_zip.write(filename, os.path.basename(os.path.abspath(filename))) for filename in bucket_files: file_zip.write(filename, os.path.basename(os.path.abspath(filename))) symbol_basename = os.path.basename(os.path.abspath(symbol_path)) for filename in os.listdir(symbol_path): if not filename.startswith('.'): file_zip.write( os.path.join(symbol_path, filename), os.path.join( symbol_basename, os.path.basename(os.path.abspath(filename)))) file_zip.close() returncode = UploadCommand._run_gsutil(options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path) finally: os.remove(filename_zip) return returncode
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 2) dump_path = args[1] gs_path = args[2] dump_files = SubCommand._find_all_dumps(dump_path) bucket_files = SubCommand._find_all_buckets(dump_path) prefix = SubCommand._find_prefix(dump_path) symbol_data_sources = SymbolDataSources(prefix) symbol_data_sources.prepare() symbol_path = symbol_data_sources.path() handle_zip, filename_zip = tempfile.mkstemp('.zip', 'dmprof') os.close(handle_zip) try: file_zip = zipfile.ZipFile(filename_zip, 'w', zipfile.ZIP_DEFLATED) for filename in dump_files: file_zip.write(filename, os.path.basename(os.path.abspath(filename))) for filename in bucket_files: file_zip.write(filename, os.path.basename(os.path.abspath(filename))) symbol_basename = os.path.basename(os.path.abspath(symbol_path)) for filename in os.listdir(symbol_path): if not filename.startswith('.'): file_zip.write(os.path.join(symbol_path, filename), os.path.join(symbol_basename, os.path.basename( os.path.abspath(filename)))) file_zip.close() returncode = UploadCommand._run_gsutil( options.gsutil, 'cp', '-a', 'public-read', filename_zip, gs_path) finally: os.remove(filename_zip) return returncode
def do(self, sys_argv): options, args = self._parse_args(sys_argv, 4) dump_path = args[1] target_policy = args[2] component_name = args[3] depth = args[4] alternative_dirs_dict = {} policy_set = PolicySet.load(SubCommand._parse_policy_list(target_policy)) if not policy_set[target_policy].find_rule(component_name): sys.stderr.write("ERROR: Component %s not found in policy %s\n" % (component_name, target_policy)) return 1 if options.alternative_dirs: for alternative_dir_pair in options.alternative_dirs.split(':'): target_path, host_path = alternative_dir_pair.split('@', 1) alternative_dirs_dict[target_path] = host_path (bucket_set, dump) = SubCommand.load_basic_files( dump_path, False, alternative_dirs=alternative_dirs_dict) ExpandCommand._output(dump, policy_set[target_policy], bucket_set, component_name, int(depth), sys.stdout) return 0