示例#1
0
def evaluate_delta(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs to evaluate-delta")

    (old, new) = args.remainder
    vargs = vars_of_args(args)
    old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
    new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)

    env = {}
    ident = re.compile('(\w+)$')
    for r in compare_stats(args, old_stats.stats, new_stats.stats):
        if r.name.startswith("time.") or '.time.' in r.name:
            continue
        m = re.search(ident, r.name)
        if m:
            i = m.groups()[0]
            if args.verbose:
                print("%s => %s" % (i, r.delta))
            env[i] = r.delta
    try:
        if eval(args.evaluate_delta, env):
            return 0
        else:
            print("evaluate-delta condition failed: '%s'" %
                  args.evaluate_delta)
            return 1
    except Exception as e:
        print(e)
        return 1
示例#2
0
def show_paired_incrementality(args):
    fieldnames = [
        "old_pct", "old_skip", "new_pct", "new_skip", "delta_pct",
        "delta_skip", "name"
    ]
    out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab')
    out.writeheader()
    vargs = vars_of_args(args)

    for (name, (oldstats, newstats)) in load_paired_stats_dirs(args):
        olddriver = merge_all_jobstats(
            (x for x in oldstats if x.is_driver_job()), **vargs)
        newdriver = merge_all_jobstats(
            (x for x in newstats if x.is_driver_job()), **vargs)
        if olddriver is None or newdriver is None:
            continue
        oldpct = olddriver.incrementality_percentage()
        newpct = newdriver.incrementality_percentage()
        deltapct = newpct - oldpct
        oldskip = olddriver.driver_jobs_skipped()
        newskip = newdriver.driver_jobs_skipped()
        deltaskip = newskip - oldskip
        out.writerow(
            dict(name=name,
                 old_pct=oldpct,
                 old_skip=oldskip,
                 new_pct=newpct,
                 new_skip=newskip,
                 delta_pct=deltapct,
                 delta_skip=deltaskip))
示例#3
0
def show_paired_incrementality(args):
    fieldnames = ["old_pct", "old_skip",
                  "new_pct", "new_skip",
                  "delta_pct", "delta_skip",
                  "name"]
    out = csv.DictWriter(args.output, fieldnames, dialect='excel-tab')
    out.writeheader()
    vargs = vars_of_args(args)

    for (name, (oldstats, newstats)) in load_paired_stats_dirs(args):
        olddriver = merge_all_jobstats((x for x in oldstats
                                        if x.is_driver_job()), **vargs)
        newdriver = merge_all_jobstats((x for x in newstats
                                        if x.is_driver_job()), **vargs)
        if olddriver is None or newdriver is None:
            continue
        oldpct = olddriver.incrementality_percentage()
        newpct = newdriver.incrementality_percentage()
        deltapct = newpct - oldpct
        oldskip = olddriver.driver_jobs_skipped()
        newskip = newdriver.driver_jobs_skipped()
        deltaskip = newskip - oldskip
        out.writerow(dict(name=name,
                          old_pct=oldpct, old_skip=oldskip,
                          new_pct=newpct, new_skip=newskip,
                          delta_pct=deltapct, delta_skip=deltaskip))
示例#4
0
def evaluate_delta(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs to evaluate-delta")

    (old, new) = args.remainder
    vargs = vars_of_args(args)
    old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
    new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)

    env = {}
    ident = re.compile('(\w+)$')
    for r in compare_stats(args, old_stats.stats, new_stats.stats):
        if r.name.startswith("time.") or '.time.' in r.name:
            continue
        m = re.search(ident, r.name)
        if m:
            i = m.groups()[0]
            if args.verbose:
                print("%s => %s" % (i, r.delta))
            env[i] = r.delta
    try:
        if eval(args.evaluate_delta, env):
            return 0
        else:
            print("evaluate-delta condition failed: '%s'" %
                  args.evaluate_delta)
            return 1
    except Exception as e:
        print(e)
        return 1
示例#5
0
def compare_stats_dirs(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs")

    vargs = vars_of_args(args)
    (old, new) = args.remainder
    old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
    new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)

    return write_comparison(args, old_stats.stats, new_stats.stats)
示例#6
0
def compare_stats_dirs(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs")

    vargs = vars_of_args(args)
    (old, new) = args.remainder
    old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
    new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)

    return write_comparison(args, old_stats.stats, new_stats.stats)
示例#7
0
def compare_stats_dirs(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs")

    (old, new) = args.remainder
    old_stats = merge_all_jobstats(load_stats_dir(
        old, select_module=args.select_module),
                                   select_module=args.select_module,
                                   group_by_module=args.group_by_module)
    new_stats = merge_all_jobstats(load_stats_dir(
        new, select_module=args.select_module),
                                   select_module=args.select_module,
                                   group_by_module=args.group_by_module)

    return write_comparison(args, old_stats.stats, new_stats.stats)
def write_lnt_values(args):
    for d in args.remainder:
        stats = load_stats_dir(d,
                               select_module=args.select_module,
                               select_stat=args.select_stat,
                               exclude_timers=args.exclude_timers)
        merged = merge_all_jobstats(stats,
                                    select_module=args.select_module,
                                    group_by_module=args.group_by_module)
        j = merged.to_lnt_test_obj(args)
        if args.lnt_submit is None:
            json.dump(j, args.output, indent=4)
        else:
            url = args.lnt_submit
            print "\nsubmitting to LNT server: " + url
            json_report = {'input_data': json.dumps(j), 'commit': '1'}
            data = urllib.urlencode(json_report)
            response_str = urllib2.urlopen(urllib2.Request(url, data))
            response = json.loads(response_str.read())
            print "### response:"
            print response
            if 'success' in response:
                print "server response:\tSuccess"
            else:
                print "server response:\tError"
                print "error:\t", response['error']
                sys.exit(1)
示例#9
0
def evaluate(args):
    if len(args.remainder) != 1:
        raise ValueError("Expected exactly 1 stats-dir to evaluate against")

    d = args.remainder[0]
    vargs = vars_of_args(args)
    merged = merge_all_jobstats(load_stats_dir(d, **vargs), **vargs)
    env = {}
    ident = re.compile('(\w+)$')
    for (k, v) in merged.stats.items():
        if k.startswith("time.") or '.time.' in k:
            continue
        m = re.search(ident, k)
        if m:
            i = m.groups()[0]
            if args.verbose:
                print("%s => %s" % (i, v))
            env[i] = v
    try:
        if eval(args.evaluate, env):
            return 0
        else:
            print("evaluate condition failed: '%s'" % args.evaluate)
            return 1
    except Exception as e:
        print(e)
        return 1
示例#10
0
def evaluate(args):
    if len(args.remainder) != 1:
        raise ValueError("Expected exactly 1 stats-dir to evaluate against")

    d = args.remainder[0]
    vargs = vars_of_args(args)
    merged = merge_all_jobstats(load_stats_dir(d, **vargs), **vargs)
    env = {}
    ident = re.compile('(\w+)$')
    for (k, v) in merged.stats.items():
        if k.startswith("time.") or '.time.' in k:
            continue
        m = re.search(ident, k)
        if m:
            i = m.groups()[0]
            if args.verbose:
                print("%s => %s" % (i, v))
            env[i] = v
    try:
        if eval(args.evaluate, env):
            return 0
        else:
            print("evaluate condition failed: '%s'" % args.evaluate)
            return 1
    except Exception as e:
        print(e)
        return 1
示例#11
0
def set_csv_baseline(args):
    existing = None
    if os.path.exists(args.set_csv_baseline):
        with open(args.set_csv_baseline, "r") as f:
            existing = read_stats_dict_from_csv(f)
            print("updating %d baseline entries in %s" %
                  (len(existing), args.set_csv_baseline))
    else:
        print "making new baseline " + args.set_csv_baseline
    fieldnames = ["epoch", "name", "value"]
    with open(args.set_csv_baseline, "wb") as f:
        out = csv.DictWriter(f,
                             fieldnames,
                             dialect='excel-tab',
                             quoting=csv.QUOTE_NONNUMERIC)
        sel = args.select_module
        m = merge_all_jobstats((s for d in args.remainder
                                for s in load_stats_dir(d, select_module=sel)),
                               select_module=sel,
                               group_by_module=args.group_by_module)
        changed = 0
        newepoch = int(time.time())
        for name in sorted(m.stats.keys()):
            epoch = newepoch
            value = m.stats[name]
            if existing is not None:
                if name not in existing:
                    continue
                (epoch, value,
                 chg) = update_epoch_value(existing, name, epoch, value)
                changed += chg
            out.writerow(dict(epoch=int(epoch), name=name, value=int(value)))
        if existing is not None:
            print "changed %d entries in baseline" % changed
    return 0
示例#12
0
def compare_stats_dirs(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs")

    vargs = vars(args)
    if args.select_stats_from_csv_baseline is not None:
        b = read_stats_dict_from_csv(args.select_stats_from_csv_baseline)
        if args.group_by_module:
            pat = re.compile('^\w+\.')
            vargs['select_stat'] = set(re.sub(pat, '', k) for k in b.keys())
        else:
            vargs['select_stat'] = b.keys()

    (old, new) = args.remainder
    old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
    new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)

    return write_comparison(args, old_stats.stats, new_stats.stats)
示例#13
0
def compare_to_csv_baseline(args):
    old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline)
    m = merge_all_jobstats(
        (s for d in args.remainder for s in load_stats_dir(d, **vars(args))),
        **vars(args))
    old_stats = dict((k, v) for (k, (_, v)) in old_stats.items())
    new_stats = m.stats

    return write_comparison(args, old_stats, new_stats)
示例#14
0
def compare_stats_dirs(args):
    if len(args.remainder) != 2:
        raise ValueError("Expected exactly 2 stats-dirs")

    vargs = vars(args)
    if args.select_stats_from_csv_baseline is not None:
        b = read_stats_dict_from_csv(args.select_stats_from_csv_baseline)
        if args.group_by_module:
            pat = re.compile('^\w+\.')
            vargs['select_stat'] = set(re.sub(pat, '', k) for k in b.keys())
        else:
            vargs['select_stat'] = b.keys()

    (old, new) = args.remainder
    old_stats = merge_all_jobstats(load_stats_dir(old, **vargs), **vargs)
    new_stats = merge_all_jobstats(load_stats_dir(new, **vargs), **vargs)

    return write_comparison(args, old_stats.stats, new_stats.stats)
示例#15
0
def compare_to_csv_baseline(args):
    old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline,
                                         select_stat=args.select_stat)
    m = merge_all_jobstats((s for d in args.remainder
                            for s in load_stats_dir(d, **vars(args))),
                           **vars(args))
    old_stats = dict((k, v) for (k, (_, v)) in old_stats.items())
    new_stats = m.stats

    return write_comparison(args, old_stats, new_stats)
示例#16
0
def compare_to_csv_baseline(args):
    old_stats = read_stats_dict_from_csv(args.compare_to_csv_baseline)
    sel = args.select_module
    m = merge_all_jobstats((s for d in args.remainder
                            for s in load_stats_dir(d, select_module=sel)),
                           select_module=sel,
                           group_by_module=args.group_by_module)
    old_stats = dict((k, v) for (k, (_, v)) in old_stats.items())
    new_stats = m.stats

    return write_comparison(args, old_stats, new_stats)
示例#17
0
def compare_to_csv_baseline(args):
    vargs = vars_of_args(args)
    with io.open(args.compare_to_csv_baseline, 'r', encoding='utf-8') as f:
        old_stats = read_stats_dict_from_csv(f,
                                             select_stat=vargs['select_stat'])
    m = merge_all_jobstats((s for d in args.remainder
                            for s in load_stats_dir(d, **vargs)), **vargs)
    old_stats = dict((k, v) for (k, (_, v)) in old_stats.items())
    new_stats = m.stats

    return write_comparison(args, old_stats, new_stats)
示例#18
0
def set_csv_baseline(args):
    existing = None
    vargs = vars_of_args(args)
    if os.path.exists(args.set_csv_baseline):
        with io.open(args.set_csv_baseline,
                     "r",
                     encoding='utf-8',
                     newline='\n') as f:
            ss = vargs['select_stat']
            existing = read_stats_dict_from_csv(f, select_stat=ss)
            print("updating %d baseline entries in %s" %
                  (len(existing), args.set_csv_baseline))
    else:
        print("making new baseline " + args.set_csv_baseline)
    fieldnames = ["epoch", "name", "value"]

    def _open(path):
        if sys.version_info[0] < 3:
            return open(path, 'wb')
        return io.open(path, "w", encoding='utf-8', newline='\n')

    with _open(args.set_csv_baseline) as f:
        out = csv.DictWriter(f,
                             fieldnames,
                             dialect='excel-tab',
                             quoting=csv.QUOTE_NONNUMERIC)
        m = merge_all_jobstats((s for d in args.remainder
                                for s in load_stats_dir(d, **vargs)), **vargs)
        if m is None:
            print("no stats found")
            return 1
        changed = 0
        newepoch = int(time.time())
        for name in sorted(m.stats.keys()):
            epoch = newepoch
            value = m.stats[name]
            if existing is not None:
                if name not in existing:
                    continue
                (epoch, value,
                 chg) = update_epoch_value(existing, name, epoch, value)
                changed += chg
            out.writerow(dict(epoch=int(epoch), name=name, value=int(value)))
        if existing is not None:
            print("changed %d entries in baseline" % changed)
    return 0
示例#19
0
def write_lnt_values(args):
    for d in args.remainder:
        stats = load_stats_dir(d, **vars(args))
        merged = merge_all_jobstats(stats, **vars(args))
        j = merged.to_lnt_test_obj(args)
        if args.lnt_submit is None:
            json.dump(j, args.output, indent=4)
        else:
            url = args.lnt_submit
            print "\nsubmitting to LNT server: " + url
            json_report = {'input_data': json.dumps(j), 'commit': '1'}
            data = urllib.urlencode(json_report)
            response_str = urllib2.urlopen(urllib2.Request(url, data))
            response = json.loads(response_str.read())
            print "### response:"
            print response
            if 'success' in response:
                print "server response:\tSuccess"
            else:
                print "server response:\tError"
                print "error:\t", response['error']
                sys.exit(1)
示例#20
0
def set_csv_baseline(args):
    existing = None
    vargs = vars_of_args(args)
    if os.path.exists(args.set_csv_baseline):
        with open(args.set_csv_baseline, "r") as f:
            ss = vargs['select_stat']
            existing = read_stats_dict_from_csv(f, select_stat=ss)
            print ("updating %d baseline entries in %s" %
                   (len(existing), args.set_csv_baseline))
    else:
        print "making new baseline " + args.set_csv_baseline
    fieldnames = ["epoch", "name", "value"]
    with open(args.set_csv_baseline, "wb") as f:
        out = csv.DictWriter(f, fieldnames, dialect='excel-tab',
                             quoting=csv.QUOTE_NONNUMERIC)
        m = merge_all_jobstats((s for d in args.remainder
                                for s in load_stats_dir(d, **vargs)),
                               **vargs)
        if m is None:
            print "no stats found"
            return 1
        changed = 0
        newepoch = int(time.time())
        for name in sorted(m.stats.keys()):
            epoch = newepoch
            value = m.stats[name]
            if existing is not None:
                if name not in existing:
                    continue
                (epoch, value, chg) = update_epoch_value(existing, name,
                                                         epoch, value)
                changed += chg
            out.writerow(dict(epoch=int(epoch),
                              name=name,
                              value=int(value)))
        if existing is not None:
            print "changed %d entries in baseline" % changed
    return 0
示例#21
0
def write_lnt_values(args):
    vargs = vars_of_args(args)
    for d in args.remainder:
        stats = load_stats_dir(d, **vargs)
        merged = merge_all_jobstats(stats, **vargs)
        j = merged.to_lnt_test_obj(args)
        if args.lnt_submit is None:
            json.dump(j, args.output, indent=4)
        else:
            url = args.lnt_submit
            print("\nsubmitting to LNT server: " + url)
            json_report = {'input_data': json.dumps(j), 'commit': '1'}
            data = urllib.urlencode(json_report)
            response_str = URLOpen(Request(url, data))
            response = json.loads(response_str.read())
            print("### response:")
            print(response)
            if 'success' in response:
                print("server response:\tSuccess")
            else:
                print("server response:\tError")
                print("error:\t", response['error'])
                sys.exit(1)