def __str__(self): res = [] summ = self.get_summary() summary = "Simmary: {cores} cores, {ram}B RAM, {disk}B storage" res.append(summary.format(cores=summ['cores'], ram=utils.b2ssize(summ['ram']), disk=utils.b2ssize(summ['storage']))) res.append(str(self.sys_name)) if self.mb is not None: res.append("Motherboard: " + self.mb) if self.ram_size == 0: res.append("RAM: Failed to get RAM size") else: res.append("RAM " + utils.b2ssize(self.ram_size) + "B") if self.cores == []: res.append("CPU cores: Failed to get CPU info") else: res.append("CPU cores:") for name, count in self.cores: if count > 1: res.append(" {0} * {1}".format(count, name)) else: res.append(" " + name) if self.storage_controllers != []: res.append("Disk controllers:") for descr in self.storage_controllers: res.append(" " + descr) if self.disks_info != {}: res.append("Storage devices:") for dev, (model, size) in sorted(self.disks_info.items()): ssize = utils.b2ssize(size) + "B" res.append(" {0} {1} {2}".format(dev, ssize, model)) else: res.append("Storage devices's: Failed to get info") if self.disks_raw_info != {}: res.append("Disks devices:") for dev, descr in sorted(self.disks_raw_info.items()): res.append(" {0} {1}".format(dev, descr)) else: res.append("Disks devices's: Failed to get info") if self.net_info != {}: res.append("Net adapters:") for name, (speed, dtype, _) in self.net_info.items(): res.append(" {0} {2} duplex={1}".format(name, dtype, speed)) else: res.append("Net adapters: Failed to get net info") return str(self.hostname) + ":\n" + "\n".join(" " + i for i in res)
def print_consumption(agg, min_transfer=None): rev_items = [] for (node_or_role, dev), v in agg.all_together.items(): rev_items.append((int(v), node_or_role + ':' + dev)) res = sorted(rev_items, reverse=True) if min_transfer is not None: res = [(v, k) for (v, k) in res if v >= min_transfer] if len(res) == 0: return None res = [(b2ssize(v) + "B", k) for (v, k) in res] max_name_sz = max(len(name) for _, name in res) max_val_sz = max(len(val) for val, _ in res) frmt = " {{0:>{0}}} | {{1:>{1}}} ".format(max_name_sz, max_val_sz) table = [frmt.format("Component", "Usage")] for (v, k) in res: table.append(frmt.format(k, v)) return "\n".join(table)
def main(argv): opts = parse_args(argv) stor_dir = os.path.join(opts.results_folder, 'sensor_storage') data = {} source_id2hostname = {} csv_files = os.listdir(stor_dir) for fname in csv_files: assert re.match(r"\d+_\d+.csv$", fname) csv_files.sort(key=lambda x: int(x.split('_')[0])) for fname in csv_files: with open(os.path.join(stor_dir, fname)) as fd: for name, node_sens_data in load_results_csv(fd).items(): if name in data: assert data[name].hostname == node_sens_data.hostname assert data[name].source_id == node_sens_data.source_id assert data[name].headers == node_sens_data.headers data[name].values.extend(node_sens_data.values) else: data[name] = node_sens_data for nd in data.values(): assert nd.source_id not in source_id2hostname source_id2hostname[nd.source_id] = nd.hostname nd.finalize() roles_file = os.path.join(opts.results_folder, 'nodes.yaml') src2roles = yaml.load(open(roles_file)) timings = load_test_timings(opts.results_folder) roles_map = make_roles_mapping(src2roles, source_id2hostname) max_diff = float(opts.max_diff) / 1000 fields = ('recv_bytes', 'send_bytes', 'sectors_read', 'sectors_written', 'reads_completed', 'writes_completed') if opts.fields != []: fields = [field for field in fields if field in opts.fields] for test_name, intervals in sorted(timings.items()): if opts.select_loads != []: if test_name not in opts.select_loads: continue data_chunks = get_data_for_intervals(data, intervals) consumption = total_consumption(data_chunks, roles_map) bottlenecks = print_bottlenecks(data_chunks) testdata_sz = get_testdata_size(consumption) * max_diff testop_count = get_testop_cout(consumption) * max_diff per_consumer_table = {} per_consumer_table_str = {} all_consumers = set() #consumption.values()[0].all_together) for value in consumption.values(): all_consumers = all_consumers | set(value.all_together) fields = [field for field in fields if field in consumption] all_consumers_sum = [] for consumer in all_consumers: tb_str = per_consumer_table_str[consumer] = [] tb = per_consumer_table[consumer] = [] vl = 0 for name in fields: val = consumption[name].all_together[consumer] if SINFO_MAP[name].to_bytes_coef is None: if val < testop_count: tb_str.append('0') else: tb_str.append(b2ssize_10(int(val))) else: val = int(val) * SINFO_MAP[name].to_bytes_coef if val < testdata_sz: tb_str.append('-') else: tb_str.append(b2ssize(val) + "B") tb.append(int(val)) vl += int(val) all_consumers_sum.append((vl, consumer)) all_consumers_sum.sort(reverse=True) plot_consumption(per_consumer_table, fields, testdata_sz / max_diff) tt = texttable.Texttable(max_width=130) tt.set_cols_align(["l"] + ["r"] * len(fields)) header = ["Name"] for fld in fields: if fld in SINFO_MAP: header.append(SINFO_MAP[fld].print_name) else: header.append(fld) tt.header(header) for summ, consumer in all_consumers_sum: if summ > 0: tt.add_row([":".join(consumer)] + per_consumer_table_str[consumer]) tt.set_deco(texttable.Texttable.VLINES | texttable.Texttable.HEADER) res = tt.draw() max_len = max(map(len, res.split("\n"))) print test_name.center(max_len) print res print bottlenecks
def plot_consumption(per_consumer_table, fields, refload): if pgv is None: return hosts = {} storage_sensors = ('sectors_written', 'sectors_read') for (hostname, dev), consumption in per_consumer_table.items(): if hostname not in hosts: hosts[hostname] = Host(hostname) host = hosts[hostname] cons_map = dict(zip(fields, consumption)) for sn in storage_sensors: vl = cons_map.get(sn, 0) if vl > 0: host.hdd_devs.setdefault(dev, {})[sn] = vl p = pgv.AGraph(name='system', directed=True) net = "Network" p.add_node(net) in_color = 'red' out_color = 'green' for host in hosts.values(): g = p.subgraph(name="cluster_" + host.name, label=host.name, color="blue") g.add_node(host.name, shape="diamond") p.add_edge(host.name, net) p.add_edge(net, host.name) for dev_name, values in host.hdd_devs.items(): if dev_name == '*': continue to = values.get('sectors_written', 0) frm = values.get('sectors_read', 0) to_pw = 7 * to / refload frm_pw = 7 * frm / refload min_with = 0.1 if to_pw > min_with or frm_pw > min_with: dev_fqn = host.name + "." + dev_name g.add_node(dev_fqn) if to_pw > min_with: g.add_edge(host.name, dev_fqn, label=b2ssize(to) + "B", penwidth=to_pw, fontcolor=out_color, color=out_color) if frm_pw > min_with: g.add_edge(dev_fqn, host.name, label=b2ssize(frm) + "B", penwidth=frm_pw, color=in_color, fontcolor=in_color) return p.string()
def main(argv): opts = parse_args(argv) stor_dir = os.path.join(opts.results_folder, 'sensor_storage') data = {} source_id2hostname = {} csv_files = os.listdir(stor_dir) for fname in csv_files: assert re.match(r"\d+_\d+.csv$", fname) csv_files.sort(key=lambda x: int(x.split('_')[0])) for fname in csv_files: with open(os.path.join(stor_dir, fname)) as fd: for name, node_sens_data in load_results_csv(fd).items(): if name in data: assert data[name].hostname == node_sens_data.hostname assert data[name].source_id == node_sens_data.source_id assert data[name].headers == node_sens_data.headers data[name].values.extend(node_sens_data.values) else: data[name] = node_sens_data for nd in data.values(): assert nd.source_id not in source_id2hostname source_id2hostname[nd.source_id] = nd.hostname nd.finalize() roles_file = os.path.join(opts.results_folder, 'nodes.yaml') src2roles = yaml.load(open(roles_file)) timings = load_test_timings(opts.results_folder) roles_map = make_roles_mapping(src2roles, source_id2hostname) max_diff = float(opts.max_diff) / 1000 fields = ('recv_bytes', 'send_bytes', 'sectors_read', 'sectors_written', 'reads_completed', 'writes_completed') if opts.fields != []: fields = [field for field in fields if field in opts.fields] for test_name, intervals in sorted(timings.items()): if opts.select_loads != []: if test_name not in opts.select_loads: continue data_chunks = get_data_for_intervals(data, intervals) consumption = total_consumption(data_chunks, roles_map) bottlenecks = print_bottlenecks(data_chunks) testdata_sz = get_testdata_size(consumption) * max_diff testop_count = get_testop_cout(consumption) * max_diff per_consumer_table = {} per_consumer_table_str = {} all_consumers = set()#consumption.values()[0].all_together) for value in consumption.values(): all_consumers = all_consumers | set(value.all_together) fields = [field for field in fields if field in consumption] all_consumers_sum = [] for consumer in all_consumers: tb_str = per_consumer_table_str[consumer] = [] tb = per_consumer_table[consumer] = [] vl = 0 for name in fields: val = consumption[name].all_together[consumer] if SINFO_MAP[name].to_bytes_coef is None: if val < testop_count: tb_str.append('0') else: tb_str.append(b2ssize_10(int(val))) else: val = int(val) * SINFO_MAP[name].to_bytes_coef if val < testdata_sz: tb_str.append('-') else: tb_str.append(b2ssize(val) + "B") tb.append(int(val)) vl += int(val) all_consumers_sum.append((vl, consumer)) all_consumers_sum.sort(reverse=True) plot_consumption(per_consumer_table, fields, testdata_sz / max_diff) tt = texttable.Texttable(max_width=130) tt.set_cols_align(["l"] + ["r"] * len(fields)) header = ["Name"] for fld in fields: if fld in SINFO_MAP: header.append(SINFO_MAP[fld].print_name) else: header.append(fld) tt.header(header) for summ, consumer in all_consumers_sum: if summ > 0: tt.add_row([":".join(consumer)] + per_consumer_table_str[consumer]) tt.set_deco(texttable.Texttable.VLINES | texttable.Texttable.HEADER) res = tt.draw() max_len = max(map(len, res.split("\n"))) print test_name.center(max_len) print res print bottlenecks