def evaluate_delta(args): if len(args.remainder) != 2: raise ValueError("Expected exactly 2 stats-dirs to evaluate-delta") (old, new) = args.remainder vargs = vars_of_args(args) old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs) new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs) env = {} ident = re.compile('(\w+)$') for r in compare_stats(args, old_stats.stats, new_stats.stats): if r.name.startswith("time.") or '.time.' in r.name: continue m = re.search(ident, r.name) if m: i = m.groups()[0] if args.verbose: print("%s => %s" % (i, r.delta)) env[i] = r.delta try: if eval(args.evaluate_delta, env): return 0 else: print("evaluate-delta condition failed: '%s'" % args.evaluate_delta) return 1 except Exception as e: print(e) return 1
def load_paired_stats_dirs(args): assert (len(args.remainder) == 2) paired_stats = [] mod = args.select_module stat = args.select_stat xt = args.exclude_timers (old, new) = args.remainder for p in sorted(os.listdir(old)): full_old = os.path.join(old, p) full_new = os.path.join(new, p) if not (os.path.exists(full_old) and os.path.isdir(full_old) and os.path.exists(full_new) and os.path.isdir(full_new)): continue old_stats = load_stats_dir(full_old, select_module=mod, select_stat=stat, exclude_timers=xt) new_stats = load_stats_dir(full_new, select_module=mod, select_stat=stat, exclude_timers=xt) if len(old_stats) == 0 or len(new_stats) == 0: continue paired_stats.append((p, (old_stats, new_stats))) return paired_stats
def compare_stats_dirs(args): if len(args.remainder) != 2: raise ValueError("Expected exactly 2 stats-dirs") vargs = vars_of_args(args) (old, new) = args.remainder old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs) new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs) return write_comparison(args, old_stats.stats, new_stats.stats)
def compare_stats_dirs(args): if len(args.remainder) != 2: raise ValueError("Expected exactly 2 stats-dirs") (old, new) = args.remainder old_stats = merge_all_jobstats(load_stats_dir( old, select_module=args.select_module), select_module=args.select_module, group_by_module=args.group_by_module) new_stats = merge_all_jobstats(load_stats_dir( new, select_module=args.select_module), select_module=args.select_module, group_by_module=args.group_by_module) return write_comparison(args, old_stats.stats, new_stats.stats)
def evaluate(args): if len(args.remainder) != 1: raise ValueError("Expected exactly 1 stats-dir to evaluate against") d = args.remainder[0] vargs = vars_of_args(args) merged = merge_all_jobstats(load_stats_dir(d, **vargs), **vargs) env = {} ident = re.compile('(\w+)$') for (k, v) in merged.stats.items(): if k.startswith("time.") or '.time.' in k: continue m = re.search(ident, k) if m: i = m.groups()[0] if args.verbose: print("%s => %s" % (i, v)) env[i] = v try: if eval(args.evaluate, env): return 0 else: print("evaluate condition failed: '%s'" % args.evaluate) return 1 except Exception as e: print(e) return 1
def load_paired_stats_dirs(args): assert(len(args.remainder) == 2) paired_stats = [] (old, new) = args.remainder for p in sorted(os.listdir(old)): full_old = os.path.join(old, p) full_new = os.path.join(new, p) if not (os.path.exists(full_old) and os.path.isdir(full_old) and os.path.exists(full_new) and os.path.isdir(full_new)): continue old_stats = load_stats_dir(full_old, **vars(args)) new_stats = load_stats_dir(full_new, **vars(args)) if len(old_stats) == 0 or len(new_stats) == 0: continue paired_stats.append((p, (old_stats, new_stats))) return paired_stats
def load_paired_stats_dirs(args): assert (len(args.remainder) == 2) paired_stats = [] (old, new) = args.remainder for p in sorted(os.listdir(old)): full_old = os.path.join(old, p) full_new = os.path.join(new, p) if not (os.path.exists(full_old) and os.path.isdir(full_old) and os.path.exists(full_new) and os.path.isdir(full_new)): continue old_stats = load_stats_dir(full_old, **vars(args)) new_stats = load_stats_dir(full_new, **vars(args)) if len(old_stats) == 0 or len(new_stats) == 0: continue paired_stats.append((p, (old_stats, new_stats))) return paired_stats
def write_lnt_values(args): for d in args.remainder: stats = load_stats_dir(d, select_module=args.select_module, select_stat=args.select_stat, exclude_timers=args.exclude_timers) merged = merge_all_jobstats(stats, select_module=args.select_module, group_by_module=args.group_by_module) j = merged.to_lnt_test_obj(args) if args.lnt_submit is None: json.dump(j, args.output, indent=4) else: url = args.lnt_submit print "\nsubmitting to LNT server: " + url json_report = {'input_data': json.dumps(j), 'commit': '1'} data = urllib.urlencode(json_report) response_str = urllib2.urlopen(urllib2.Request(url, data)) response = json.loads(response_str.read()) print "### response:" print response if 'success' in response: print "server response:\tSuccess" else: print "server response:\tError" print "error:\t", response['error'] sys.exit(1)
def set_csv_baseline(args): existing = None if os.path.exists(args.set_csv_baseline): with open(args.set_csv_baseline, "r") as f: existing = read_stats_dict_from_csv(f) print("updating %d baseline entries in %s" % (len(existing), args.set_csv_baseline)) else: print "making new baseline " + args.set_csv_baseline fieldnames = ["epoch", "name", "value"] with open(args.set_csv_baseline, "wb") as f: out = csv.DictWriter(f, fieldnames, dialect='excel-tab', quoting=csv.QUOTE_NONNUMERIC) sel = args.select_module m = merge_all_jobstats((s for d in args.remainder for s in load_stats_dir(d, select_module=sel)), select_module=sel, group_by_module=args.group_by_module) changed = 0 newepoch = int(time.time()) for name in sorted(m.stats.keys()): epoch = newepoch value = m.stats[name] if existing is not None: if name not in existing: continue (epoch, value, chg) = update_epoch_value(existing, name, epoch, value) changed += chg out.writerow(dict(epoch=int(epoch), name=name, value=int(value))) if existing is not None: print "changed %d entries in baseline" % changed return 0
def write_catapult_trace(args): allstats = [] for path in args.remainder: allstats += load_stats_dir(path, select_module=args.select_module, select_stat=args.select_stat, exclude_timers=args.exclude_timers) json.dump([s.to_catapult_trace_obj() for s in allstats], args.output)
def write_catapult_trace(args): allstats = [] vargs = vars_of_args(args) for path in args.remainder: allstats += load_stats_dir(path, **vargs) allstats.sort(key=attrgetter('start_usec')) for i in range(len(allstats)): allstats[i].jobid = i json.dump([s.to_catapult_trace_obj() for s in allstats], args.output)
def compare_stats_dirs(args): if len(args.remainder) != 2: raise ValueError("Expected exactly 2 stats-dirs") vargs = vars(args) if args.select_stats_from_csv_baseline is not None: b = read_stats_dict_from_csv(args.select_stats_from_csv_baseline) if args.group_by_module: pat = re.compile('^\w+\.') vargs['select_stat'] = set(re.sub(pat, '', k) for k in b.keys()) else: vargs['select_stat'] = b.keys() (old, new) = args.remainder old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs) new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs) return write_comparison(args, old_stats.stats, new_stats.stats)
def compare_to_csv_baseline(args): old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline) m = merge_all_jobstats( (s for d in args.remainder for s in load_stats_dir(d, **vars(args))), **vars(args)) old_stats = dict((k, v) for (k, (_, v)) in old_stats.items()) new_stats = m.stats return write_comparison(args, old_stats, new_stats)
def compare_to_csv_baseline(args): old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline, select_stat=args.select_stat) m = merge_all_jobstats((s for d in args.remainder for s in load_stats_dir(d, **vars(args))), **vars(args)) old_stats = dict((k, v) for (k, (_, v)) in old_stats.items()) new_stats = m.stats return write_comparison(args, old_stats, new_stats)
def compare_to_csv_baseline(args): old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline) sel = args.select_module m = merge_all_jobstats((s for d in args.remainder for s in load_stats_dir(d, select_module=sel)), select_module=sel, group_by_module=args.group_by_module) old_stats = dict((k, v) for (k, (_, v)) in old_stats.items()) new_stats = m.stats return write_comparison(args, old_stats, new_stats)
def compare_to_csv_baseline(args): vargs = vars_of_args(args) with io.open(args.compare_to_csv_baseline, 'r', encoding='utf-8') as f: old_stats = read_stats_dict_from_csv(f, select_stat=vargs['select_stat']) m = merge_all_jobstats((s for d in args.remainder for s in load_stats_dir(d, **vargs)), **vargs) old_stats = dict((k, v) for (k, (_, v)) in old_stats.items()) new_stats = m.stats return write_comparison(args, old_stats, new_stats)
def show_incrementality(args): fieldnames = ["incrementality", "name"] out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab') out.writeheader() for path in args.remainder: stats = load_stats_dir(path, **vars(args)) for s in stats: if s.is_driver_job(): pct = s.incrementality_percentage() out.writerow(dict(name=os.path.basename(path), incrementality=pct))
def show_incrementality(args): fieldnames = ["incrementality", "name"] out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab') out.writeheader() for path in args.remainder: stats = load_stats_dir(path, **vars(args)) for s in stats: if s.is_driver_job(): pct = s.incrementality_percentage() out.writerow( dict(name=os.path.basename(path), incrementality=pct))
def set_csv_baseline(args): existing = None vargs = vars_of_args(args) if os.path.exists(args.set_csv_baseline): with io.open(args.set_csv_baseline, "r", encoding='utf-8', newline='\n') as f: ss = vargs['select_stat'] existing = read_stats_dict_from_csv(f, select_stat=ss) print("updating %d baseline entries in %s" % (len(existing), args.set_csv_baseline)) else: print("making new baseline " + args.set_csv_baseline) fieldnames = ["epoch", "name", "value"] def _open(path): if sys.version_info[0] < 3: return open(path, 'wb') return io.open(path, "w", encoding='utf-8', newline='\n') with _open(args.set_csv_baseline) as f: out = csv.DictWriter(f, fieldnames, dialect='excel-tab', quoting=csv.QUOTE_NONNUMERIC) m = merge_all_jobstats((s for d in args.remainder for s in load_stats_dir(d, **vargs)), **vargs) if m is None: print("no stats found") return 1 changed = 0 newepoch = int(time.time()) for name in sorted(m.stats.keys()): epoch = newepoch value = m.stats[name] if existing is not None: if name not in existing: continue (epoch, value, chg) = update_epoch_value(existing, name, epoch, value) changed += chg out.writerow(dict(epoch=int(epoch), name=name, value=int(value))) if existing is not None: print("changed %d entries in baseline" % changed) return 0
def write_lnt_values(args): for d in args.remainder: stats = load_stats_dir(d, **vars(args)) merged = merge_all_jobstats(stats, **vars(args)) j = merged.to_lnt_test_obj(args) if args.lnt_submit is None: json.dump(j, args.output, indent=4) else: url = args.lnt_submit print "\nsubmitting to LNT server: " + url json_report = {'input_data': json.dumps(j), 'commit': '1'} data = urllib.urlencode(json_report) response_str = urllib2.urlopen(urllib2.Request(url, data)) response = json.loads(response_str.read()) print "### response:" print response if 'success' in response: print "server response:\tSuccess" else: print "server response:\tError" print "error:\t", response['error'] sys.exit(1)
def set_csv_baseline(args): existing = None vargs = vars_of_args(args) if os.path.exists(args.set_csv_baseline): with open(args.set_csv_baseline, "r") as f: ss = vargs['select_stat'] existing = read_stats_dict_from_csv(f, select_stat=ss) print ("updating %d baseline entries in %s" % (len(existing), args.set_csv_baseline)) else: print "making new baseline " + args.set_csv_baseline fieldnames = ["epoch", "name", "value"] with open(args.set_csv_baseline, "wb") as f: out = csv.DictWriter(f, fieldnames, dialect='excel-tab', quoting=csv.QUOTE_NONNUMERIC) m = merge_all_jobstats((s for d in args.remainder for s in load_stats_dir(d, **vargs)), **vargs) if m is None: print "no stats found" return 1 changed = 0 newepoch = int(time.time()) for name in sorted(m.stats.keys()): epoch = newepoch value = m.stats[name] if existing is not None: if name not in existing: continue (epoch, value, chg) = update_epoch_value(existing, name, epoch, value) changed += chg out.writerow(dict(epoch=int(epoch), name=name, value=int(value))) if existing is not None: print "changed %d entries in baseline" % changed return 0
def write_lnt_values(args): vargs = vars_of_args(args) for d in args.remainder: stats = load_stats_dir(d, **vargs) merged = merge_all_jobstats(stats, **vargs) j = merged.to_lnt_test_obj(args) if args.lnt_submit is None: json.dump(j, args.output, indent=4) else: url = args.lnt_submit print("\nsubmitting to LNT server: " + url) json_report = {'input_data': json.dumps(j), 'commit': '1'} data = urllib.urlencode(json_report) response_str = URLOpen(Request(url, data)) response = json.loads(response_str.read()) print("### response:") print(response) if 'success' in response: print("server response:\tSuccess") else: print("server response:\tError") print("error:\t", response['error']) sys.exit(1)
def write_catapult_trace(args): allstats = [] vargs = vars_of_args(args) for path in args.remainder: allstats += load_stats_dir(path, **vargs) json.dump([s.to_catapult_trace_obj() for s in allstats], args.output)
def write_catapult_trace(args): allstats = [] for path in args.remainder: allstats += load_stats_dir(path, **vars(args)) json.dump([s.to_catapult_trace_obj() for s in allstats], args.output)