Esempio n. 1
0
def main(argv):
    colorama.init()
    if get_git_version() < MIN_UPSTREAM_TRACK_GIT_VERSION:
        print >> sys.stderr, (
            'This tool will not show all tracking information for git version '
            'earlier than ' +
            '.'.join(str(x) for x in MIN_UPSTREAM_TRACK_GIT_VERSION) +
            '. Please consider upgrading.')

    parser = argparse.ArgumentParser(
        description='Print a a tree of all branches parented by their upstreams'
    )
    parser.add_argument('-v',
                        action='count',
                        help='Display branch hash and Rietveld URL')
    parser.add_argument('--no-color',
                        action='store_true',
                        dest='nocolor',
                        help='Turn off colors.')

    opts = parser.parse_args(argv[1:])

    mapper = BranchMapper()
    mapper.verbosity = opts.v
    mapper.output.nocolor = opts.nocolor
    mapper.start()
    print mapper.output.as_formatted_string()
Esempio n. 2
0
def main(argv):
    colorama.init()
    assert len(argv) == 1, "No arguments expected"
    branch_map = {}
    par_map = collections.defaultdict(list)
    for branch in branches():
        par = upstream(branch) or NO_UPSTREAM
        branch_map[branch] = par
        par_map[par].append(branch)

    current = current_branch()
    hashes = hash_multi(current, *branch_map.keys())
    current_hash = hashes[0]
    par_hashes = {
        k: hashes[i + 1]
        for i, k in enumerate(branch_map.iterkeys())
    }
    par_hashes[NO_UPSTREAM] = 0
    tag_set = tags()
    while par_map:
        for parent in par_map:
            if parent not in branch_map:
                if parent not in par_hashes:
                    par_hashes[parent] = hash_one(parent)
                print_branch(current, current_hash, parent, par_hashes,
                             par_map, branch_map, tag_set)
                break
def main(argv):
  colorama.init()
  if get_git_version() < MIN_UPSTREAM_TRACK_GIT_VERSION:
    print >> sys.stderr, (
        'This tool will not show all tracking information for git version '
        'earlier than ' +
        '.'.join(str(x) for x in MIN_UPSTREAM_TRACK_GIT_VERSION) +
        '. Please consider upgrading.')

  parser = argparse.ArgumentParser(
      description='Print a a tree of all branches parented by their upstreams')
  parser.add_argument('-v', action='count',
                      help='Display branch hash and Rietveld URL')
  parser.add_argument('--no-color', action='store_true', dest='nocolor',
                      help='Turn off colors.')
  parser.add_argument(
      '-j', '--maxjobs', action='store', type=int,
      help='The number of jobs to use when retrieving review status')
  parser.add_argument('--show-subject', action='store_true',
                      dest='show_subject', help='Show the commit subject.')

  opts = parser.parse_args(argv)

  mapper = BranchMapper()
  mapper.verbosity = opts.v
  mapper.output.nocolor = opts.nocolor
  mapper.maxjobs = opts.maxjobs
  mapper.show_subject = opts.show_subject
  mapper.start()
  print mapper.output.as_formatted_string()
  return 0
Esempio n. 4
0
def print_version_change(config_version):
    """Print a notice to let the user know we are collecting more metrics."""
    colorama.init()
    print(colorama.Fore.RED + '\033[1m', file=sys.stderr, end='')
    print(NOTICE_VERSION_CHANGE_HEADER, file=sys.stderr)
    print(EMPTY_LINE, file=sys.stderr)
    for version in range(config_version + 1, CURRENT_VERSION + 1):
        print(CHANGE_NOTICE[version], file=sys.stderr)
        print(EMPTY_LINE, file=sys.stderr)
Esempio n. 5
0
def print_notice(countdown):
  """Print a notice to let the user know the status of metrics collection."""
  colorama.init()
  print(colorama.Fore.RED + '\033[1m', file=sys.stderr)
  if countdown:
    print(NOTICE_COUNTDOWN_HEADER % countdown, file=sys.stderr)
  else:
    print(NOTICE_COLLECTION_HEADER, file=sys.stderr)
  print(NOTICE_FOOTER + colorama.Style.RESET_ALL, file=sys.stderr)
Esempio n. 6
0
def print_red_boxed_text(out, min_width, lines):
    colorama.init()
    if sys.platform == 'win32':
        [EW, NS, SE, SW, NE, NW] = list('=|++++')
    else:
        [EW, NS, SE, SW, NE,
         NW] = list(u'\u2501\u2503\u250F\u2513\u2517\u251B')
    out(colorama.Fore.RED + colorama.Style.BRIGHT)
    width = max(min_width, max(len(line) for line in lines))
    out(SE + EW * (width + 2) + SW + '\n')
    for line in lines:
        out('%s %-*s %s\n' % (NS, width, line, NS))
    out(NE + EW * (width + 2) + NW + '\n')
    out(colorama.Style.RESET_ALL)
Esempio n. 7
0
def Main(argv):
    """Doesn't parse the arguments here, just find the right subcommand to
  execute."""
    if sys.hexversion < 0x02060000:
        print >> sys.stderr, (
            '\nYour python version %s is unsupported, please upgrade.\n' %
            sys.version.split(' ', 1)[0])
        return 2
    if not sys.executable:
        print >> sys.stderr, (
            '\nPython cannot find the location of it\'s own executable.\n')
        return 2
    fix_encoding.fix_encoding()
    disable_buffering()
    colorama.init()
    dispatcher = subcommand.CommandDispatcher(__name__)
    try:
        return dispatcher.execute(OptionParser(), argv)
    except KeyboardInterrupt:
        gclient_utils.GClientChildren.KillAllRemainingChildren()
        raise
    except (gclient_utils.Error, subprocess2.CalledProcessError), e:
        print >> sys.stderr, 'Error: %s' % str(e)
        return 1
Esempio n. 8
0
def main(argv):
  colorama.init()
  assert len(argv) == 1, "No arguments expected"
  branch_map = {}
  par_map = collections.defaultdict(list)
  for branch in branches():
    par = upstream(branch) or NO_UPSTREAM
    branch_map[branch] = par
    par_map[par].append(branch)

  current = current_branch()
  hashes = hash_multi(current, *branch_map.keys())
  current_hash = hashes[0]
  par_hashes = {k: hashes[i+1] for i, k in enumerate(branch_map.iterkeys())}
  par_hashes[NO_UPSTREAM] = 0
  tag_set = tags()
  while par_map:
    for parent in par_map:
      if parent not in branch_map:
        if parent not in par_hashes:
          par_hashes[parent] = hash_one(parent)
        print_branch(current, current_hash, parent, par_hashes, par_map,
                     branch_map, tag_set)
        break
Esempio n. 9
0
def Main(argv):
  """Doesn't parse the arguments here, just find the right subcommand to
  execute."""
  if sys.hexversion < 0x02060000:
    print >> sys.stderr, (
        '\nYour python version %s is unsupported, please upgrade.\n' %
        sys.version.split(' ', 1)[0])
    return 2
  if not sys.executable:
    print >> sys.stderr, (
        '\nPython cannot find the location of it\'s own executable.\n')
    return 2
  fix_encoding.fix_encoding()
  disable_buffering()
  colorama.init()
  dispatcher = subcommand.CommandDispatcher(__name__)
  try:
    return dispatcher.execute(OptionParser(), argv)
  except KeyboardInterrupt:
    gclient_utils.GClientChildren.KillAllRemainingChildren()
    raise
  except (gclient_utils.Error, subprocess2.CalledProcessError), e:
    print >> sys.stderr, 'Error: %s' % str(e)
    return 1
  if options.os:
    if options.os not in oses:
      parser.error(
          '--os %s is unknown. Valid values are %s' % (
            options.os, ', '.join(sorted(oses))))
    oses = [options.os]

  if sys.platform in ('win32', 'cygwin'):
    # If we are on Windows, don't generate the tests for Linux and Mac since
    # they use symlinks and we can't create symlinks on windows.
    oses = ['Windows']
    if options.os != 'win32':
      print('Linux and Mac tests skipped since running on Windows.')

  return run_swarming_tests_on_swarming(
      options.swarming,
      options.isolate_server,
      options.priority,
      oses,
      tests,
      options.logs,
      options.no_idempotent)


if __name__ == '__main__':
  fix_encoding.fix_encoding()
  tools.disable_buffering()
  colorama.init()
  sys.exit(main())
Esempio n. 11
0
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  swarming.add_filter_options(parser)
  parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '-s', '--send-rate', type='float', default=16., metavar='RATE',
      help='Rate (item/s) of sending requests as a float, default: %default')
  group.add_option(
      '-D', '--duration', type='float', default=60., metavar='N',
      help='Duration (s) of the sending phase of the load test, '
           'default: %default')
  group.add_option(
      '-m', '--concurrent', type='int', default=200, metavar='N',
      help='Maximum concurrent on-going requests, default: %default')
  group.add_option(
      '-t', '--timeout', type='float', default=3600., metavar='N',
      help='Timeout to get results, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.duration <= 0:
    parser.error('Needs --duration > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  total = options.send_rate * options.duration
  print(
      'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
      'total %d' %
        (options.send_rate, options.duration, options.concurrent,
        options.timeout, total))
  print('[processing/processed/todo]')

  # This is used so there's no clash between runs and actual real usage.
  unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
  columns = [('processing', 0), ('processed', 0), ('todo', 0)]
  progress = threading_utils.Progress(columns)
  index = 0
  with threading_utils.ThreadPoolWithProgress(
      progress, 1, options.concurrent, 0) as pool:
    try:
      start = time.time()
      while True:
        duration = time.time() - start
        if duration > options.duration:
          break
        should_have_triggered_so_far = int(duration * options.send_rate)
        while index < should_have_triggered_so_far:
          pool.add_task(
              0,
              trigger_task,
              options.swarming,
              options.dimensions,
              progress,
              unique,
              options.timeout,
              index)
          progress.update_item('', todo=1)
          index += 1
          progress.print_update()
        time.sleep(0.01)
    except KeyboardInterrupt:
      aborted = pool.abort()
      progress.update_item(
          'Got Ctrl-C. Aborted %d unsent tasks.' % aborted,
          raw=True,
          todo=-aborted)
      progress.print_update()
    finally:
      # TODO(maruel): We could give up on collecting results for the on-going
      # tasks but that would need to be optional.
      progress.update_item('Getting results for on-going tasks.', raw=True)
      results = sorted(pool.join())
  progress.print_update()
  # At this point, progress is not used anymore.
  print('')
  print(' - Took %.1fs.' % (time.time() - start))
  print('')
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
Esempio n. 12
0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules['__main__'].__doc__)
    tomorrow = datetime.datetime.utcnow().date() + datetime.timedelta(days=1)
    year = datetime.datetime(tomorrow.year, 1, 1)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default=os.environ.get('SWARMING_SERVER', ''),
                      help='Swarming server to use')
    group = optparse.OptionGroup(parser, 'Filtering')
    group.add_option(
        '--start',
        default=year.strftime('%Y-%m-%d'),
        help='Starting date in UTC; defaults to start of year: %default')
    group.add_option('--end',
                     default=tomorrow.strftime('%Y-%m-%d'),
                     help='End date in UTC; defaults to tomorrow: %default')
    group.add_option('--state',
                     default='ALL',
                     type='choice',
                     choices=STATES,
                     help='State to filter on')
    group.add_option('--tags',
                     action='append',
                     default=[],
                     help='Tags to filter on')
    parser.add_option_group(group)
    group = optparse.OptionGroup(parser, 'Presentation')
    group.add_option('--dimensions',
                     action='store_true',
                     help='Show the dimensions')
    group.add_option(
        '--daily-count',
        action='store_true',
        help='Show the daily count in raw number instead of histogram')
    parser.add_option_group(group)
    parser.add_option('--json',
                      default='counts.json',
                      help='File containing raw data; default: %default')
    parser.add_option('-v', '--verbose', action='count', default=0, help='Log')
    options, args = parser.parse_args()

    if args:
        parser.error('Unsupported argument %s' % args)
    logging.basicConfig(
        level=logging.DEBUG if options.verbose else logging.ERROR)
    start = parse_time_option(options.start)
    end = parse_time_option(options.end)
    print('From %s (%d) to %s (%d)' %
          (start, int((start - _EPOCH).total_seconds()), end,
           int((end - _EPOCH).total_seconds())))
    if options.swarming:
        if options.dimensions:
            data = fetch_tasks(options.swarming, start, end, options.state,
                               options.tags)
        else:
            data = fetch_counts(options.swarming, start, end, options.state,
                                options.tags)
        with open(options.json, 'wb') as f:
            json.dump(data, f)
    elif not os.path.isfile(options.json):
        parser.error('--swarming is required.')
    else:
        with open(options.json, 'rb') as f:
            data = json.load(f)

    print('')
    if options.dimensions:
        present_dimensions(data, options.daily_count)
    else:
        present_counts(data, options.daily_count)
    return 0
def main():
  colorama.init()

  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-I', '--isolate-server',
      metavar='URL', default='',
      help='Isolate server to use')
  parser.add_option(
      '--namespace', default='temporary%d-gzip' % time.time(), metavar='XX',
      help='Namespace to use on the server, default: %default')
  parser.add_option(
      '--threads', type='int', default=16, metavar='N',
      help='Parallel worker threads to use, default:%default')

  data_group = optparse.OptionGroup(parser, 'Amount of data')
  graph.unit_option(
      data_group, '--items', default=0, help='Number of items to upload')
  graph.unit_option(
      data_group, '--max-size', default=0,
      help='Loop until this amount of data was transferred')
  graph.unit_option(
      data_group, '--mid-size', default=100*1024,
      help='Rough average size of each item, default:%default')
  parser.add_option_group(data_group)

  ui_group = optparse.OptionGroup(parser, 'Result histogram')
  ui_group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='Width of histogram, default:%default')
  ui_group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of histogram\'s buckets, default:%default')
  parser.add_option_group(ui_group)

  log_group = optparse.OptionGroup(parser, 'Logging')
  log_group.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  log_group.add_option(
      '-v', '--verbose', action='store_true', help='Enable logging')
  parser.add_option_group(log_group)

  options, args = parser.parse_args()

  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  if bool(options.max_size) == bool(options.items):
    parser.error(
        'Use one of --max-size or --items.\n'
        '  Use --max-size if you want to run it until NN bytes where '
        'transfered.\n'
        '  Otherwise use --items to run it for NN items.')
  options.isolate_server = options.isolate_server.rstrip('/')
  if not options.isolate_server:
    parser.error('--isolate-server is required.')

  print(
      ' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' % (
      options.threads, options.items, options.max_size, options.mid_size))

  start = time.time()

  random_pool = Randomness()
  print(' - Generated pool after %.1fs' % (time.time() - start))

  columns = [('index', 0), ('data', 0), ('size', options.items)]
  progress = Progress(columns)
  storage = isolateserver.get_storage(options.isolate_server, options.namespace)
  do_item = functools.partial(
      send_and_receive,
      random_pool,
      storage,
      progress)

  # TODO(maruel): Handle Ctrl-C should:
  # - Stop adding tasks.
  # - Stop scheduling tasks in ThreadPool.
  # - Wait for the remaining ungoing tasks to complete.
  # - Still print details and write the json file.
  with threading_utils.ThreadPoolWithProgress(
      progress, options.threads, options.threads, 0) as pool:
    if options.items:
      for _ in xrange(options.items):
        pool.add_task(0, do_item, gen_size(options.mid_size))
        progress.print_update()
    elif options.max_size:
      # This one is approximate.
      total = 0
      while True:
        size = gen_size(options.mid_size)
        progress.update_item('', size=1)
        progress.print_update()
        pool.add_task(0, do_item, size)
        total += size
        if total >= options.max_size:
          break
    results = sorted(pool.join())

  print('')
  print(' - Took %.1fs.' % (time.time() - start))
  print('')
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
Esempio n. 14
0
def init():
    # should_wrap instructs colorama to wrap stdout/stderr with an ANSI colorcode
    # interpreter that converts them to SetConsoleTextAttribute calls. This only
    # should be True in cases where we're connected to cmd.exe's console. Setting
    # this to True on non-windows systems has no effect.
    should_wrap = False
    global IS_TTY, OUT_TYPE
    IS_TTY = sys.stdout.isatty()
    is_windows = sys.platform.startswith('win')
    if IS_TTY:
        # Yay! We detected a console in the normal way. It doesn't really matter
        # if it's windows or not, we win.
        OUT_TYPE = 'console'
        should_wrap = True
    elif is_windows:
        # assume this is some sort of file
        OUT_TYPE = 'file (win)'

        import msvcrt
        h = msvcrt.get_osfhandle(sys.stdout.fileno())
        # h is the win32 HANDLE for stdout.
        ftype = ctypes.windll.kernel32.GetFileType(h)
        if ftype == 2:  # FILE_TYPE_CHAR
            # This is a normal cmd console, but we'll only get here if we're running
            # inside a `git command` which is actually git->bash->command. Not sure
            # why isatty doesn't detect this case.
            OUT_TYPE = 'console (cmd via msys)'
            IS_TTY = True
            should_wrap = True
        elif ftype == 3:  # FILE_TYPE_PIPE
            OUT_TYPE = 'pipe (win)'

            # This is some kind of pipe on windows. This could either be a real pipe
            # or this could be msys using a pipe to emulate a pty. We use the same
            # algorithm that msys-git uses to determine if it's connected to a pty or
            # not.

            # This function and the structures are defined in the MSDN documentation
            # using the same names.
            def NT_SUCCESS(status):
                # The first two bits of status are the severity. The success
                # severities are 0 and 1, and the !success severities are 2 and 3.
                # Therefore since ctypes interprets the default restype of the call
                # to be an 'C int' (which is guaranteed to be signed 32 bits), All
                # success codes are positive, and all !success codes are negative.
                return status >= 0

            class UNICODE_STRING(ctypes.Structure):
                _fields_ = [('Length', ctypes.c_ushort),
                            ('MaximumLength', ctypes.c_ushort),
                            ('Buffer', ctypes.c_wchar_p)]

            class OBJECT_NAME_INFORMATION(ctypes.Structure):
                _fields_ = [('Name', UNICODE_STRING),
                            ('NameBuffer', ctypes.c_wchar_p)]

            buf = ctypes.create_string_buffer(1024)
            # Ask NT what the name of the object our stdout HANDLE is. It would be
            # possible to use GetFileInformationByHandleEx, but it's only available
            # on Vista+. If you're reading this in 2017 or later, feel free to
            # refactor this out.
            #
            # The '1' here is ObjectNameInformation
            if NT_SUCCESS(
                    ctypes.windll.ntdll.NtQueryObject(h, 1, buf,
                                                      len(buf) - 2, None)):
                out = OBJECT_NAME_INFORMATION.from_buffer(buf)
                name = out.Name.Buffer.split('\\')[-1]
                IS_TTY = name.startswith('msys-') and '-pty' in name
                if IS_TTY:
                    OUT_TYPE = 'bash (msys)'
        else:
            # A normal file, or an unknown file type.
            pass
    else:
        # This is non-windows, so we trust isatty.
        OUT_TYPE = 'pipe or file'

    if IS_TTY and is_windows:
        # Wrapping may cause errors on some Windows versions (crbug.com/1114548).
        if platform.release() != '10' or enable_native_ansi():
            should_wrap = False

    colorama.init(wrap=should_wrap)
Esempio n. 15
0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option(
        '-t',
        '--timeout',
        type='float',
        default=15 * 60.,
        metavar='N',
        help='Task expiration and timeout to get results, the task itself will '
        'have %ds less than the value provided. Default: %%default' %
        TIMEOUT_OVERHEAD)
    group.add_option('-o',
                     '--output-size',
                     type='int',
                     default=100,
                     metavar='N',
                     help='Bytes sent to stdout, default: %default')
    group.add_option(
        '--sleep',
        type='int',
        default=60,
        metavar='N',
        help='Amount of time the bot should sleep, e.g. faking work, '
        'default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = int(round(options.send_rate * options.duration))
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    results = []
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(
                    round(duration * options.send_rate))
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, options.sleep,
                                  options.output_size, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            for i in pool.iter_results():
                results.append(i)
                # This is a bit excessive but it's useful in the case where some tasks
                # hangs, so at least partial data is available.
                if options.dump:
                    results.sort()
                    if os.path.exists(options.dump):
                        os.rename(options.dump, options.dump + '.old')
                    with open(options.dump, 'wb') as f:
                        json.dump(results, f, separators=(',', ':'))
            if not options.dump:
                results.sort()
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    return 0
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  parser.add_option(
      '--suffix', metavar='NAME', default='', help='Bot suffix name to use')
  swarming.add_filter_options(parser)
  # Use improbable values to reduce the chance of interferring with real slaves.
  parser.set_defaults(
      dimensions=[
        ('cpu', ['arm36']),
        ('hostname', socket.getfqdn()),
        ('os', OS_NAME),
      ])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '--slaves', type='int', default=300, metavar='N',
      help='Number of swarm bot slaves, default: %default')
  group.add_option(
      '-c', '--consume', type='float', default=60., metavar='N',
      help='Duration (s) for consuming a request, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.consume <= 0:
    parser.error('Needs --consume > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  print(
      'Running %d slaves, each task lasting %.1fs' % (
        options.slaves, options.consume))
  print('Ctrl-C to exit.')
  print('[processing/processed/bots]')
  columns = [('processing', 0), ('processed', 0), ('bots', 0)]
  progress = threading_utils.Progress(columns)
  events = Queue.Queue()
  start = time.time()
  kill_event = threading.Event()
  swarm_bot_version_hash = calculate_version(options.swarming + '/bot_code')
  hostname = get_hostname()
  if options.suffix:
    hostname += '-' + options.suffix
  slaves = [
    FakeSwarmBot(
      options.swarming, options.dimensions, swarm_bot_version_hash, hostname, i,
      progress, options.consume, events, kill_event)
    for i in range(options.slaves)
  ]
  try:
    # Wait for all the slaves to come alive.
    while not all(s.is_alive() for s in slaves):
      time.sleep(0.01)
    progress.update_item('Ready to run')
    while slaves:
      progress.print_update()
      time.sleep(0.01)
      # The slaves could be told to die.
      slaves = [s for s in slaves if s.is_alive()]
  except KeyboardInterrupt:
    kill_event.set()

  progress.update_item('Waiting for slaves to quit.', raw=True)
  progress.update_item('')
  while slaves:
    progress.print_update()
    slaves = [s for s in slaves if s.is_alive()]
  # At this point, progress is not used anymore.
  print('')
  print('Ran for %.1fs.' % (time.time() - start))
  print('')
  results = list(events.queue)
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
Esempio n. 17
0
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  swarming.add_filter_options(parser)
  # Use improbable values to reduce the chance of interferring with real slaves.
  parser.set_defaults(
      dimensions=[
        ('bits', '36'),
        ('machine', os.uname()[4] + '-experimental'),
        ('os', OS_NAME),
      ])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '--slaves', type='int', default=300, metavar='N',
      help='Number of swarm bot slaves, default: %default')
  group.add_option(
      '-c', '--consume', type='float', default=60., metavar='N',
      help='Duration (s) for consuming a request, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.consume <= 0:
    parser.error('Needs --consume > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  print(
      'Running %d slaves, each task lasting %.1fs' % (
        options.slaves, options.consume))
  print('Ctrl-C to exit.')
  print('[processing/processed/bots]')
  columns = [('processing', 0), ('processed', 0), ('bots', 0)]
  progress = threading_utils.Progress(columns)
  events = Queue.Queue()
  start = time.time()
  kill_event = threading.Event()
  swarm_bot_version_hash = calculate_version(
      options.swarming + '/get_slave_code')
  slaves = [
    FakeSwarmBot(
      options.swarming, options.dimensions, swarm_bot_version_hash, i, progress,
      options.consume, events, kill_event)
    for i in range(options.slaves)
  ]
  try:
    # Wait for all the slaves to come alive.
    while not all(s.is_alive() for s in slaves):
      time.sleep(0.01)
    progress.update_item('Ready to run')
    while slaves:
      progress.print_update()
      time.sleep(0.01)
      # The slaves could be told to die.
      slaves = [s for s in slaves if s.is_alive()]
  except KeyboardInterrupt:
    kill_event.set()

  progress.update_item('Waiting for slaves to quit.', raw=True)
  progress.update_item('')
  while slaves:
    progress.print_update()
    slaves = [s for s in slaves if s.is_alive()]
  # At this point, progress is not used anymore.
  print('')
  print('Ran for %.1fs.' % (time.time() - start))
  print('')
  results = events.queue
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option('-t',
                     '--timeout',
                     type='float',
                     default=3600.,
                     metavar='N',
                     help='Timeout to get results, default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = options.send_rate * options.duration
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(duration *
                                                   options.send_rate)
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
        finally:
            # TODO(maruel): We could give up on collecting results for the on-going
            # tasks but that would need to be optional.
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            results = sorted(pool.join())
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0
Esempio n. 19
0
def init():
  # should_wrap instructs colorama to wrap stdout/stderr with an ASNI colorcode
  # interpreter that converts them to SetConsoleTextAttribute calls. This only
  # should be True in cases where we're connected to cmd.exe's console. Setting
  # this to True on non-windows systems has no effect.
  should_wrap = False
  global IS_TTY, OUT_TYPE
  IS_TTY = sys.stdout.isatty()
  if IS_TTY:
    # Yay! We detected a console in the normal way. It doesn't really matter
    # if it's windows or not, we win.
    OUT_TYPE = 'console'
    should_wrap = True
  elif sys.platform.startswith('win'):
    # assume this is some sort of file
    OUT_TYPE = 'file (win)'

    import msvcrt
    import ctypes
    h = msvcrt.get_osfhandle(sys.stdout.fileno())
    # h is the win32 HANDLE for stdout.
    ftype = ctypes.windll.kernel32.GetFileType(h)
    if ftype == 2: # FILE_TYPE_CHAR
      # This is a normal cmd console, but we'll only get here if we're running
      # inside a `git command` which is actually git->bash->command. Not sure
      # why isatty doesn't detect this case.
      OUT_TYPE = 'console (cmd via msys)'
      IS_TTY = True
      should_wrap = True
    elif ftype == 3: # FILE_TYPE_PIPE
      OUT_TYPE = 'pipe (win)'
      # This is some kind of pipe on windows. This could either be a real pipe
      # or this could be msys using a pipe to emulate a pty. We use the same
      # algorithm that msys-git uses to determine if it's connected to a pty or
      # not.

      # This function and the structures are defined in the MSDN documentation
      # using the same names.
      def NT_SUCCESS(status):
        # The first two bits of status are the severity. The success
        # severities are 0 and 1, and the !success severities are 2 and 3.
        # Therefore since ctypes interprets the default restype of the call
        # to be an 'C int' (which is guaranteed to be signed 32 bits), All
        # success codes are positive, and all !success codes are negative.
        return status >= 0

      class UNICODE_STRING(ctypes.Structure):
        _fields_ = [('Length', ctypes.c_ushort),
                    ('MaximumLength', ctypes.c_ushort),
                    ('Buffer', ctypes.c_wchar_p)]

      class OBJECT_NAME_INFORMATION(ctypes.Structure):
        _fields_ = [('Name', UNICODE_STRING),
                    ('NameBuffer', ctypes.c_wchar_p)]

      buf = ctypes.create_string_buffer('\0', 1024)
      # Ask NT what the name of the object our stdout HANDLE is. It would be
      # possible to use GetFileInformationByHandleEx, but it's only available
      # on Vista+. If you're reading this in 2017 or later, feel free to
      # refactor this out.
      #
      # The '1' here is ObjectNameInformation
      if NT_SUCCESS(ctypes.windll.ntdll.NtQueryObject(h, 1, buf, len(buf)-2,
                    None)):
        out = OBJECT_NAME_INFORMATION.from_buffer(buf)
        name = out.Name.Buffer.split('\\')[-1]
        IS_TTY = name.startswith('msys-') and '-pty' in name
        if IS_TTY:
          OUT_TYPE = 'bash (msys)'
    else:
      # A normal file, or an unknown file type.
      pass
  else:
    # This is non-windows, so we trust isatty.
    OUT_TYPE = 'pipe or file'

  colorama.init(wrap=should_wrap)
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules['__main__'].__doc__)
  tomorrow = datetime.datetime.utcnow().date() + datetime.timedelta(days=1)
  year = datetime.datetime(tomorrow.year, 1, 1)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
      help='Swarming server to use')
  group = optparse.OptionGroup(parser, 'Filtering')
  group.add_option(
      '--start', default=year.strftime('%Y-%m-%d'),
      help='Starting date in UTC; defaults to start of year: %default')
  group.add_option(
      '--end', default=tomorrow.strftime('%Y-%m-%d'),
      help='End date in UTC; defaults to tomorrow: %default')
  group.add_option(
      '--state', default='ALL', type='choice', choices=STATES,
      help='State to filter on. Values are: %s' % ', '.join(STATES))
  group.add_option(
      '--tags', action='append', default=[], help='Tags to filter on')
  parser.add_option_group(group)
  group = optparse.OptionGroup(parser, 'Presentation')
  group.add_option(
      '--dimensions', action='store_true', help='Show the dimensions')
  group.add_option(
      '--daily-count', action='store_true',
      help='Show the daily count in raw number instead of histogram')
  parser.add_option_group(group)
  parser.add_option(
      '--json', default='counts.json',
      help='File containing raw data; default: %default')
  parser.add_option(
      '-v', '--verbose', action='count', default=0, help='Log')
  options, args = parser.parse_args()

  if args:
    parser.error('Unsupported argument %s' % args)
  logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
  start = parse_time_option(options.start)
  end = parse_time_option(options.end)
  print('From %s (%d) to %s (%d)' % (
      start, int((start- _EPOCH).total_seconds()),
      end, int((end - _EPOCH).total_seconds())))
  if options.swarming:
    if options.dimensions:
      data = fetch_tasks(
          options.swarming, start, end, options.state, options.tags)
    else:
      data = fetch_counts(
          options.swarming, start, end, options.state, options.tags)
    with open(options.json, 'wb') as f:
      json.dump(data, f)
  elif not os.path.isfile(options.json):
    parser.error('--swarming is required.')
  else:
    with open(options.json, 'rb') as f:
      data = json.load(f)

  print('')
  if options.dimensions:
    present_dimensions(data, options.daily_count)
  else:
    present_counts(data, options.daily_count)
  return 0
Esempio n. 21
0
def main():
    colorama.init()

    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-I',
                      '--isolate-server',
                      metavar='URL',
                      default='',
                      help='Isolate server to use')
    parser.add_option('--namespace',
                      default='temporary%d-gzip' % time.time(),
                      metavar='XX',
                      help='Namespace to use on the server, default: %default')
    parser.add_option('--threads',
                      type='int',
                      default=16,
                      metavar='N',
                      help='Parallel worker threads to use, default:%default')
    graph.unit_option(parser,
                      '--items',
                      default=0,
                      help='Number of items to upload')
    graph.unit_option(parser,
                      '--max-size',
                      default=0,
                      help='Loop until this amount of data was transferred')
    graph.unit_option(parser,
                      '--mid-size',
                      default=100 * 1024,
                      help='Rough average size of each item, default:%default')
    parser.add_option('--columns',
                      type='int',
                      default=graph.get_console_width(),
                      metavar='N',
                      help='For histogram display, default:%default')
    parser.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('--dry-run',
                      action='store_true',
                      help='Do not send anything')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enable logging')
    options, args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    if bool(options.max_size) == bool(options.items):
        parser.error(
            'Use one of --max-size or --items.\n'
            '  Use --max-size if you want to run it until NN bytes where '
            'transfered.\n'
            '  Otherwise use --items to run it for NN items.')
    if not options.dry_run:
        options.isolate_server = options.isolate_server.rstrip('/')
        if not options.isolate_server:
            parser.error('--isolate-server is required.')

    print(' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' %
          (options.threads, options.items, options.max_size, options.mid_size))
    if options.dry_run:
        print(' - %sDRY RUN MODE%s' %
              (colorama.Fore.GREEN, colorama.Fore.RESET))

    start = time.time()

    random_pool = Randomness()
    print(' - Generated pool after %.1fs' % (time.time() - start))

    columns = [('index', 0), ('data', 0), ('size', options.items)]
    progress = Progress(columns)
    api = isolateserver.get_storage_api(options.isolate_server,
                                        options.namespace)
    do_item = functools.partial(
        send_and_receive, random_pool, options.dry_run,
        isolateserver.is_namespace_with_compression(options.namespace), api,
        progress)

    # TODO(maruel): Handle Ctrl-C should:
    # - Stop adding tasks.
    # - Stop scheduling tasks in ThreadPool.
    # - Wait for the remaining ungoing tasks to complete.
    # - Still print details and write the json file.
    with threading_utils.ThreadPoolWithProgress(progress, options.threads,
                                                options.threads, 0) as pool:
        if options.items:
            for _ in xrange(options.items):
                pool.add_task(0, do_item, gen_size(options.mid_size))
                progress.print_update()
        elif options.max_size:
            # This one is approximate.
            total = 0
            while True:
                size = gen_size(options.mid_size)
                progress.update_item('', size=1)
                progress.print_update()
                pool.add_task(0, do_item, size)
                total += size
                if total >= options.max_size:
                    break
        results = sorted(pool.join())

    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0
Esempio n. 22
0
        parser.error(
            'Must pass one python script to run. Use --help for more details')

    # 1. Query the bots list.
    bots = get_bot_list(options.swarming, options.dimensions)
    print('Found %d bots to process' % len(bots))
    if not bots:
        return 1

    # 2. Archive the script to run.
    isolated_hash = archive(options.isolate_server, args[0])
    print('Running %s' % isolated_hash)

    # 3. Trigger the tasks.
    name = os.path.basename(args[0])
    if options.serial:
        return run_serial(options.swarming, options.isolate_server,
                          str(options.priority), str(options.deadline),
                          options.repeat, isolated_hash, name, bots)

    return run_parallel(options.swarming, options.isolate_server,
                        str(options.priority), str(options.deadline),
                        options.repeat, isolated_hash, name, bots)


if __name__ == '__main__':
    fix_encoding.fix_encoding()
    tools.disable_buffering()
    colorama.init()
    sys.exit(main())
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option("-S", "--swarming", metavar="URL", default="", help="Swarming server to use")
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[("os", swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, "Load generated")
    group.add_option(
        "-s",
        "--send-rate",
        type="float",
        default=16.0,
        metavar="RATE",
        help="Rate (item/s) of sending requests as a float, default: %default",
    )
    group.add_option(
        "-D",
        "--duration",
        type="float",
        default=60.0,
        metavar="N",
        help="Duration (s) of the sending phase of the load test, " "default: %default",
    )
    group.add_option(
        "-m",
        "--concurrent",
        type="int",
        default=200,
        metavar="N",
        help="Maximum concurrent on-going requests, default: %default",
    )
    group.add_option(
        "-t",
        "--timeout",
        type="float",
        default=15 * 60.0,
        metavar="N",
        help="Task expiration and timeout to get results, the task itself will "
        "have %ds less than the value provided. Default: %%default" % TIMEOUT_OVERHEAD,
    )
    group.add_option(
        "-o", "--output-size", type="int", default=100, metavar="N", help="Bytes sent to stdout, default: %default"
    )
    group.add_option(
        "--sleep",
        type="int",
        default=60,
        metavar="N",
        help="Amount of time the bot should sleep, e.g. faking work, " "default: %default",
    )
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, "Display options")
    group.add_option(
        "--columns",
        type="int",
        default=graph.get_console_width(),
        metavar="N",
        help="For histogram display, default:%default",
    )
    group.add_option(
        "--buckets",
        type="int",
        default=20,
        metavar="N",
        help="Number of buckets for histogram display, default:%default",
    )
    parser.add_option_group(group)

    parser.add_option("--dump", metavar="FOO.JSON", help="Dumps to json file")
    parser.add_option("-v", "--verbose", action="store_true", help="Enables logging")

    options, args = parser.parse_args()
    logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error("Unsupported args: %s" % args)
    options.swarming = options.swarming.rstrip("/")
    if not options.swarming:
        parser.error("--swarming is required.")
    if options.duration <= 0:
        parser.error("Needs --duration > 0. 0.01 is a valid value.")
    swarming.process_filter_options(parser, options)

    total = int(round(options.send_rate * options.duration))
    print(
        "Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; "
        "total %d" % (options.send_rate, options.duration, options.concurrent, options.timeout, total)
    )
    print("[processing/processed/todo]")

    # This is used so there's no clash between runs and actual real usage.
    unique = "".join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [("processing", 0), ("processed", 0), ("todo", 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    results = []
    with threading_utils.ThreadPoolWithProgress(progress, 1, options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(round(duration * options.send_rate))
                while index < should_have_triggered_so_far:
                    pool.add_task(
                        0,
                        trigger_task,
                        options.swarming,
                        options.dimensions,
                        options.sleep,
                        options.output_size,
                        progress,
                        unique,
                        options.timeout,
                        index,
                    )
                    progress.update_item("", todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
            progress.update_item("Getting results for on-going tasks.", raw=True)
            for i in pool.iter_results():
                results.append(i)
                # This is a bit excessive but it's useful in the case where some tasks
                # hangs, so at least partial data is available.
                if options.dump:
                    results.sort()
                    if os.path.exists(options.dump):
                        os.rename(options.dump, options.dump + ".old")
                    with open(options.dump, "wb") as f:
                        json.dump(results, f, separators=(",", ":"))
            if not options.dump:
                results.sort()
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item("Got Ctrl-C. Aborted %d unsent tasks." % aborted, raw=True, todo=-aborted)
            progress.print_update()
    progress.print_update()
    # At this point, progress is not used anymore.
    print("")
    print(" - Took %.1fs." % (time.time() - start))
    print("")
    print_results(results, options.columns, options.buckets)
    return 0