示例#1
0
def main():
    """Reads bench data points, then calculate and export expectations.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-a',
        '--representation_alg',
        default='25th',
        help='bench representation algorithm to use, see bench_util.py.')
    parser.add_argument(
        '-b',
        '--builder',
        required=True,
        help='name of the builder whose bench ranges we are computing.')
    parser.add_argument('-d',
                        '--input_dir',
                        required=True,
                        help='a directory containing bench data files.')
    parser.add_argument(
        '-o',
        '--output_file',
        required=True,
        help='file path and name for storing the output bench expectations.')
    parser.add_argument(
        '-r',
        '--git_revision',
        required=True,
        help='the git hash to indicate the revision of input data to use.')
    args = parser.parse_args()

    builder = args.builder

    data_points = bench_util.parse_skp_bench_data(args.input_dir,
                                                  args.git_revision,
                                                  args.representation_alg)

    expectations_dict = create_expectations_dict(data_points)

    out_lines = []
    keys = expectations_dict.keys()
    keys.sort()
    for (config, bench) in keys:
        (expected, lower_bound, upper_bound) = expectations_dict[(config,
                                                                  bench)]
        out_lines.append(
            '%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
            '%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
                'bench': bench,
                'config': config,
                'builder': builder,
                'representation': args.representation_alg,
                'expected': expected,
                'lower_bound': lower_bound,
                'upper_bound': upper_bound
            })

    with open(args.output_file, 'w') as file_handle:
        file_handle.write('\n'.join(out_lines))
示例#2
0
def main():
    """Parses command line and checks bench expectations."""
    try:
        opts, _ = getopt.getopt(sys.argv[1:], "a:b:d:e:r:", "default-setting=")
    except getopt.GetoptError as err:
        print(str(err))
        usage()
        sys.exit(2)

    directory = None
    bench_expectations = {}
    rep = '25th'  # bench representation algorithm, default to 25th
    rev = None  # git commit hash or svn revision number
    bot = None

    try:
        for option, value in opts:
            if option == "-a":
                rep = value
            elif option == "-b":
                bot = value
            elif option == "-d":
                directory = value
            elif option == "-e":
                read_expectations(bench_expectations, value)
            elif option == "-r":
                rev = value
            else:
                usage()
                assert False, "unhandled option"
    except ValueError:
        usage()
        sys.exit(2)

    if directory is None or bot is None or rev is None:
        usage()
        sys.exit(2)

    platform_and_alg = bot + '-' + rep

    data_points = bench_util.parse_skp_bench_data(directory, rev, rep)

    bench_dict = create_bench_dict(data_points)

    if bench_expectations:
        check_expectations(bench_dict, bench_expectations, platform_and_alg)
示例#3
0
def main():
    """Reads bench data points, then calculate and export expectations.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-a', '--representation_alg', default='25th',
        help='bench representation algorithm to use, see bench_util.py.')
    parser.add_argument(
        '-b', '--builder', required=True,
        help='name of the builder whose bench ranges we are computing.')
    parser.add_argument(
        '-d', '--input_dir', required=True,
        help='a directory containing bench data files.')
    parser.add_argument(
        '-o', '--output_file', required=True,
        help='file path and name for storing the output bench expectations.')
    parser.add_argument(
        '-r', '--git_revision', required=True,
        help='the git hash to indicate the revision of input data to use.')
    args = parser.parse_args()

    builder = args.builder

    data_points = bench_util.parse_skp_bench_data(
        args.input_dir, args.git_revision, args.representation_alg)

    expectations_dict = create_expectations_dict(data_points)

    out_lines = []
    keys = expectations_dict.keys()
    keys.sort()
    for (config, bench) in keys:
      (expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
      out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
          '%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
              'bench': bench,
              'config': config,
              'builder': builder,
              'representation': args.representation_alg,
              'expected': expected,
              'lower_bound': lower_bound,
              'upper_bound': upper_bound})

    with open(args.output_file, 'w') as file_handle:
      file_handle.write('\n'.join(out_lines))
示例#4
0
def main():
    """Reads bench data points, then calculate and export expectations.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-a', '--representation_alg', default='25th',
        help='bench representation algorithm to use, see bench_util.py.')
    parser.add_argument(
        '-b', '--builder', required=True,
        help='name of the builder whose bench ranges we are computing.')
    parser.add_argument(
        '-d', '--input_dir', required=True,
        help='a directory containing bench data files.')
    parser.add_argument(
        '-o', '--output_file', required=True,
        help='file path and name for storing the output bench expectations.')
    parser.add_argument(
        '-r', '--git_revision', required=True,
        help='the git hash to indicate the revision of input data to use.')
    parser.add_argument(
        '-t', '--back_track', required=False, default=10,
        help='the number of commit hashes backwards to look to include' +
             'in the calculations.')
    parser.add_argument(
        '-m', '--max_commits', required=False, default=1,
        help='the number of commit hashes to include in the calculations.')
    args = parser.parse_args()

    builder = args.builder

    data_points = bench_util.parse_skp_bench_data(
        args.input_dir, args.git_revision, args.representation_alg)

    parent_commits = get_parent_commits(args.git_revision, args.back_track)
    print "Using commits: {}".format(parent_commits)
    suffixes = get_file_suffixes(args.git_revision, args.input_dir)
    print "Using suffixes: {}".format(suffixes)

    # TODO(kelvinly): Find a better approach to than directly copying from
    # the GS server?
    downloaded_commits = []
    for idx, commit in enumerate(parent_commits):
      num_downloaded = download_bench_data(
          builder, commit, suffixes, args.input_dir)
      if num_downloaded > 0:
        downloaded_commits.append((num_downloaded, idx, commit))

    if len(downloaded_commits) < args.max_commits:
      print ('Less than desired number of commits found. Please increase'
            '--back_track in later runs')
    trunc_commits = sorted(downloaded_commits, reverse=True)[:args.max_commits]
    extra_data = []
    for _, idx, commit in trunc_commits:
      extra_data.append((idx, bench_util.parse_skp_bench_data(
          args.input_dir, commit, args.representation_alg)))

    expectations_dict = create_expectations_dict(data_points, builder,
                                                 extra_data)

    out_lines = []
    keys = expectations_dict.keys()
    keys.sort()
    for (config, bench) in keys:
      (expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
      out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
          '%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
              'bench': bench,
              'config': config,
              'builder': builder,
              'representation': args.representation_alg,
              'expected': expected,
              'lower_bound': lower_bound,
              'upper_bound': upper_bound})

    with open(args.output_file, 'w') as file_handle:
      file_handle.write('\n'.join(out_lines))
                bot = value
            elif option == "-d":
                directory = value
            elif option == "-e":
                read_expectations(bench_expectations, value)
            elif option == "-r":
                rev = value
            else:
                usage()
                assert False, "unhandled option"
    except ValueError:
        usage()
        sys.exit(2)

    if directory is None or bot is None or rev is None:
        usage()
        sys.exit(2)

    platform_and_alg = bot + '-' + rep

    data_points = bench_util.parse_skp_bench_data(directory, rev, rep)

    bench_dict = create_bench_dict(data_points)

    if bench_expectations:
        check_expectations(bench_dict, bench_expectations, platform_and_alg)


if __name__ == "__main__":
    main()
示例#6
0
def main():
    """Reads bench data points, then calculate and export expectations.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-a',
        '--representation_alg',
        default='25th',
        help='bench representation algorithm to use, see bench_util.py.')
    parser.add_argument(
        '-b',
        '--builder',
        required=True,
        help='name of the builder whose bench ranges we are computing.')
    parser.add_argument('-d',
                        '--input_dir',
                        required=True,
                        help='a directory containing bench data files.')
    parser.add_argument(
        '-o',
        '--output_file',
        required=True,
        help='file path and name for storing the output bench expectations.')
    parser.add_argument(
        '-r',
        '--git_revision',
        required=True,
        help='the git hash to indicate the revision of input data to use.')
    parser.add_argument(
        '-t',
        '--back_track',
        required=False,
        default=10,
        help='the number of commit hashes backwards to look to include' +
        'in the calculations.')
    parser.add_argument(
        '-m',
        '--max_commits',
        required=False,
        default=1,
        help='the number of commit hashes to include in the calculations.')
    args = parser.parse_args()

    builder = args.builder

    data_points = bench_util.parse_skp_bench_data(args.input_dir,
                                                  args.git_revision,
                                                  args.representation_alg)

    parent_commits = get_parent_commits(args.git_revision, args.back_track)
    print "Using commits: {}".format(parent_commits)
    suffixes = get_file_suffixes(args.git_revision, args.input_dir)
    print "Using suffixes: {}".format(suffixes)

    # TODO(kelvinly): Find a better approach to than directly copying from
    # the GS server?
    downloaded_commits = []
    for idx, commit in enumerate(parent_commits):
        num_downloaded = download_bench_data(builder, commit, suffixes,
                                             args.input_dir)
        if num_downloaded > 0:
            downloaded_commits.append((num_downloaded, idx, commit))

    if len(downloaded_commits) < args.max_commits:
        print(
            'Less than desired number of commits found. Please increase'
            '--back_track in later runs')
    trunc_commits = sorted(downloaded_commits, reverse=True)[:args.max_commits]
    extra_data = []
    for _, idx, commit in trunc_commits:
        extra_data.append(
            (idx,
             bench_util.parse_skp_bench_data(args.input_dir, commit,
                                             args.representation_alg)))

    expectations_dict = create_expectations_dict(data_points, builder,
                                                 extra_data)

    out_lines = []
    keys = expectations_dict.keys()
    keys.sort()
    for (config, bench) in keys:
        (expected, lower_bound, upper_bound) = expectations_dict[(config,
                                                                  bench)]
        out_lines.append(
            '%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
            '%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
                'bench': bench,
                'config': config,
                'builder': builder,
                'representation': args.representation_alg,
                'expected': expected,
                'lower_bound': lower_bound,
                'upper_bound': upper_bound
            })

    with open(args.output_file, 'w') as file_handle:
        file_handle.write('\n'.join(out_lines))
示例#7
0
                bot = value
            elif option == "-d":
                directory = value
            elif option == "-e":
                read_expectations(bench_expectations, value)
            elif option == "-r":
                rev = value
            else:
                usage()
                assert False, "unhandled option"
    except ValueError:
        usage()
        sys.exit(2)

    if directory is None or bot is None or rev is None:
        usage()
        sys.exit(2)

    platform_and_alg = bot + '-' + rep

    data_points = bench_util.parse_skp_bench_data(directory, rev, rep)

    bench_dict = create_bench_dict(data_points)

    if bench_expectations:
        check_expectations(bench_dict, bench_expectations, platform_and_alg)


if __name__ == "__main__":
    main()
def main():
    """Reads bench data points, then calculate and export expectations.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "-a", "--representation_alg", default="25th", help="bench representation algorithm to use, see bench_util.py."
    )
    parser.add_argument(
        "-b", "--builder", required=True, help="name of the builder whose bench ranges we are computing."
    )
    parser.add_argument("-d", "--input_dir", required=True, help="a directory containing bench data files.")
    parser.add_argument(
        "-o", "--output_file", required=True, help="file path and name for storing the output bench expectations."
    )
    parser.add_argument(
        "-r", "--git_revision", required=True, help="the git hash to indicate the revision of input data to use."
    )
    parser.add_argument(
        "-t",
        "--back_track",
        required=False,
        default=10,
        help="the number of commit hashes backwards to look to include" + "in the calculations.",
    )
    parser.add_argument(
        "-m",
        "--max_commits",
        required=False,
        default=1,
        help="the number of commit hashes to include in the calculations.",
    )
    args = parser.parse_args()

    builder = args.builder

    data_points = bench_util.parse_skp_bench_data(args.input_dir, args.git_revision, args.representation_alg)

    parent_commits = get_parent_commits(args.git_revision, args.back_track)
    print "Using commits: {}".format(parent_commits)
    suffixes = get_file_suffixes(args.git_revision, args.input_dir)
    print "Using suffixes: {}".format(suffixes)

    # TODO(kelvinly): Find a better approach to than directly copying from
    # the GS server?
    downloaded_commits = []
    for idx, commit in enumerate(parent_commits):
        num_downloaded = download_bench_data(builder, commit, suffixes, args.input_dir)
        if num_downloaded > 0:
            downloaded_commits.append((num_downloaded, idx, commit))

    if len(downloaded_commits) < args.max_commits:
        print ("Less than desired number of commits found. Please increase" "--back_track in later runs")
    trunc_commits = sorted(downloaded_commits, reverse=True)[: args.max_commits]
    extra_data = []
    for _, idx, commit in trunc_commits:
        extra_data.append((idx, bench_util.parse_skp_bench_data(args.input_dir, commit, args.representation_alg)))

    expectations_dict = create_expectations_dict(data_points, builder, extra_data)

    out_lines = []
    keys = expectations_dict.keys()
    keys.sort()
    for (config, bench) in keys:
        (expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
        out_lines.append(
            "%(bench)s_%(config)s_,%(builder)s-%(representation)s,"
            "%(expected)s,%(lower_bound)s,%(upper_bound)s"
            % {
                "bench": bench,
                "config": config,
                "builder": builder,
                "representation": args.representation_alg,
                "expected": expected,
                "lower_bound": lower_bound,
                "upper_bound": upper_bound,
            }
        )

    with open(args.output_file, "w") as file_handle:
        file_handle.write("\n".join(out_lines))