def main(host, argv):
    parser = optparse.OptionParser(usage='%prog [times_ms.json]')
    parser.add_option('-f',
                      '--forward',
                      action='store',
                      type='int',
                      help='group times by first N directories of test')
    parser.add_option('-b',
                      '--backward',
                      action='store',
                      type='int',
                      help='group times by last N directories of test')
    parser.add_option(
        '--fastest',
        action='store',
        type='float',
        help='print a list of tests that will take N % of the time')

    epilog = """
       You can print out aggregate times per directory using the -f and -b
       flags. The value passed to each flag indicates the "depth" of the flag,
       similar to positive and negative arguments to python arrays.

       For example, given fast/forms/week/week-input-type.html, -f 1
       truncates to 'fast', -f 2 and -b 2 truncates to 'fast/forms', and -b 1
       truncates to fast/forms/week . -f 0 truncates to '', which can be used
       to produce a single total time for the run."""
    parser.epilog = '\n'.join(s.lstrip() for s in epilog.splitlines())

    options, args = parser.parse_args(argv)

    port = host.port_factory.get()
    if args and args[0]:
        times_ms_path = args[0]
    else:
        times_ms_path = host.filesystem.join(port.results_directory(),
                                             'times_ms.json')

    times_trie = json.loads(host.filesystem.read_text_file(times_ms_path))

    times = convert_times_trie_to_flat_paths(times_trie)

    if options.fastest:
        if options.forward is None and options.backward is None:
            options.forward = 0
        print_fastest(host, port, options, times)
    else:
        print_times(host, options, times)
    def _fastest_tests(self, times_trie, all_tests, fastest_percentile):
        times = convert_times_trie_to_flat_paths(times_trie)

        # Ignore tests with a time==0 because those are skipped tests.
        sorted_times = sorted([test for (test, time) in times.iteritems() if time],
                              key=lambda t: (times[t], t))
        clamped_percentile = max(0, min(100, fastest_percentile))
        number_of_tests_to_return = int(len(sorted_times) * clamped_percentile / 100)
        fastest_tests = sorted_times[:number_of_tests_to_return]

        # For fastest tests, include any tests not in the times_ms.json so that
        # new tests get run in the fast set.
        unaccounted_tests = set(all_tests) - set(times.keys())

        # Using a set to dedupe here means that --order=None won't work, but that's
        # ok because --fastest already runs in an arbitrary order.
        return list(set(fastest_tests).union(unaccounted_tests))
    def _fastest_tests(self, times_trie, all_tests, fastest_percentile):
        times = convert_times_trie_to_flat_paths(times_trie)

        # Ignore tests with a time==0 because those are skipped tests.
        sorted_times = sorted([test for (test, time) in times.iteritems() if time],
                              key=lambda t: (times[t], t))
        clamped_percentile = max(0, min(100, fastest_percentile))
        number_of_tests_to_return = int(len(sorted_times) * clamped_percentile / 100)
        fastest_tests = set(sorted_times[:number_of_tests_to_return])

        # Don't try to run tests in the times_trie that no longer exist,
        fastest_tests = fastest_tests.intersection(all_tests)

        # For fastest tests, include any tests not in the times_ms.json so that
        # new tests get run in the fast set.
        unaccounted_tests = set(all_tests) - set(times.keys())

        # Using a set to dedupe here means that --order=None won't work, but that's
        # ok because --fastest already runs in an arbitrary order.
        return list(fastest_tests.union(unaccounted_tests))
def main(host, argv):
    parser = optparse.OptionParser(usage='%prog [times_ms.json]')
    parser.add_option('-f', '--forward', action='store', type='int',
                      help='group times by first N directories of test')
    parser.add_option('-b', '--backward', action='store', type='int',
                     help='group times by last N directories of test')
    parser.add_option('--fastest', action='store', type='float',
                      help='print a list of tests that will take N % of the time')

    epilog = """
       You can print out aggregate times per directory using the -f and -b
       flags. The value passed to each flag indicates the "depth" of the flag,
       similar to positive and negative arguments to python arrays.

       For example, given fast/forms/week/week-input-type.html, -f 1
       truncates to 'fast', -f 2 and -b 2 truncates to 'fast/forms', and -b 1
       truncates to fast/forms/week . -f 0 truncates to '', which can be used
       to produce a single total time for the run."""
    parser.epilog = '\n'.join(s.lstrip() for s in epilog.splitlines())

    options, args = parser.parse_args(argv)

    port = host.port_factory.get()
    if args and args[0]:
        times_ms_path = args[0]
    else:
        times_ms_path = host.filesystem.join(port.results_directory(), 'times_ms.json')

    times_trie = json.loads(host.filesystem.read_text_file(times_ms_path))

    times = convert_times_trie_to_flat_paths(times_trie)

    if options.fastest:
        if options.forward is None and options.backward is None:
            options.forward = 0
        print_fastest(host, port, options, times)
    else:
        print_times(host, options, times)