def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  swarming.add_filter_options(parser)
  parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '-s', '--send-rate', type='float', default=16., metavar='RATE',
      help='Rate (item/s) of sending requests as a float, default: %default')
  group.add_option(
      '-D', '--duration', type='float', default=60., metavar='N',
      help='Duration (s) of the sending phase of the load test, '
           'default: %default')
  group.add_option(
      '-m', '--concurrent', type='int', default=200, metavar='N',
      help='Maximum concurrent on-going requests, default: %default')
  group.add_option(
      '-t', '--timeout', type='float', default=3600., metavar='N',
      help='Timeout to get results, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.duration <= 0:
    parser.error('Needs --duration > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  total = options.send_rate * options.duration
  print(
      'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
      'total %d' %
        (options.send_rate, options.duration, options.concurrent,
        options.timeout, total))
  print('[processing/processed/todo]')

  # This is used so there's no clash between runs and actual real usage.
  unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
  columns = [('processing', 0), ('processed', 0), ('todo', 0)]
  progress = threading_utils.Progress(columns)
  index = 0
  with threading_utils.ThreadPoolWithProgress(
      progress, 1, options.concurrent, 0) as pool:
    try:
      start = time.time()
      while True:
        duration = time.time() - start
        if duration > options.duration:
          break
        should_have_triggered_so_far = int(duration * options.send_rate)
        while index < should_have_triggered_so_far:
          pool.add_task(
              0,
              trigger_task,
              options.swarming,
              options.dimensions,
              progress,
              unique,
              options.timeout,
              index)
          progress.update_item('', todo=1)
          index += 1
          progress.print_update()
        time.sleep(0.01)
    except KeyboardInterrupt:
      aborted = pool.abort()
      progress.update_item(
          'Got Ctrl-C. Aborted %d unsent tasks.' % aborted,
          raw=True,
          todo=-aborted)
      progress.print_update()
    finally:
      # TODO(maruel): We could give up on collecting results for the on-going
      # tasks but that would need to be optional.
      progress.update_item('Getting results for on-going tasks.', raw=True)
      results = sorted(pool.join())
  progress.print_update()
  # At this point, progress is not used anymore.
  print('')
  print(' - Took %.1fs.' % (time.time() - start))
  print('')
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  parser.add_option(
      '--suffix', metavar='NAME', default='', help='Bot suffix name to use')
  swarming.add_filter_options(parser)
  # Use improbable values to reduce the chance of interferring with real slaves.
  parser.set_defaults(
      dimensions=[
        ('cpu', ['arm36']),
        ('hostname', socket.getfqdn()),
        ('os', OS_NAME),
      ])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '--slaves', type='int', default=300, metavar='N',
      help='Number of swarm bot slaves, default: %default')
  group.add_option(
      '-c', '--consume', type='float', default=60., metavar='N',
      help='Duration (s) for consuming a request, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.consume <= 0:
    parser.error('Needs --consume > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  print(
      'Running %d slaves, each task lasting %.1fs' % (
        options.slaves, options.consume))
  print('Ctrl-C to exit.')
  print('[processing/processed/bots]')
  columns = [('processing', 0), ('processed', 0), ('bots', 0)]
  progress = threading_utils.Progress(columns)
  events = Queue.Queue()
  start = time.time()
  kill_event = threading.Event()
  swarm_bot_version_hash = calculate_version(options.swarming + '/bot_code')
  hostname = get_hostname()
  if options.suffix:
    hostname += '-' + options.suffix
  slaves = [
    FakeSwarmBot(
      options.swarming, options.dimensions, swarm_bot_version_hash, hostname, i,
      progress, options.consume, events, kill_event)
    for i in range(options.slaves)
  ]
  try:
    # Wait for all the slaves to come alive.
    while not all(s.is_alive() for s in slaves):
      time.sleep(0.01)
    progress.update_item('Ready to run')
    while slaves:
      progress.print_update()
      time.sleep(0.01)
      # The slaves could be told to die.
      slaves = [s for s in slaves if s.is_alive()]
  except KeyboardInterrupt:
    kill_event.set()

  progress.update_item('Waiting for slaves to quit.', raw=True)
  progress.update_item('')
  while slaves:
    progress.print_update()
    slaves = [s for s in slaves if s.is_alive()]
  # At this point, progress is not used anymore.
  print('')
  print('Ran for %.1fs.' % (time.time() - start))
  print('')
  results = list(events.queue)
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
Example #3
0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option(
        '-t',
        '--timeout',
        type='float',
        default=15 * 60.,
        metavar='N',
        help='Task expiration and timeout to get results, the task itself will '
        'have %ds less than the value provided. Default: %%default' %
        TIMEOUT_OVERHEAD)
    group.add_option('-o',
                     '--output-size',
                     type='int',
                     default=100,
                     metavar='N',
                     help='Bytes sent to stdout, default: %default')
    group.add_option(
        '--sleep',
        type='int',
        default=60,
        metavar='N',
        help='Amount of time the bot should sleep, e.g. faking work, '
        'default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = int(round(options.send_rate * options.duration))
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    results = []
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(
                    round(duration * options.send_rate))
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, options.sleep,
                                  options.output_size, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            for i in pool.iter_results():
                results.append(i)
                # This is a bit excessive but it's useful in the case where some tasks
                # hangs, so at least partial data is available.
                if options.dump:
                    results.sort()
                    if os.path.exists(options.dump):
                        os.rename(options.dump, options.dump + '.old')
                    with open(options.dump, 'wb') as f:
                        json.dump(results, f, separators=(',', ':'))
            if not options.dump:
                results.sort()
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    return 0
Example #4
0
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  swarming.add_filter_options(parser)
  # Use improbable values to reduce the chance of interferring with real slaves.
  parser.set_defaults(
      dimensions=[
        ('bits', '36'),
        ('machine', os.uname()[4] + '-experimental'),
        ('os', OS_NAME),
      ])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '--slaves', type='int', default=300, metavar='N',
      help='Number of swarm bot slaves, default: %default')
  group.add_option(
      '-c', '--consume', type='float', default=60., metavar='N',
      help='Duration (s) for consuming a request, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.consume <= 0:
    parser.error('Needs --consume > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  print(
      'Running %d slaves, each task lasting %.1fs' % (
        options.slaves, options.consume))
  print('Ctrl-C to exit.')
  print('[processing/processed/bots]')
  columns = [('processing', 0), ('processed', 0), ('bots', 0)]
  progress = threading_utils.Progress(columns)
  events = Queue.Queue()
  start = time.time()
  kill_event = threading.Event()
  swarm_bot_version_hash = calculate_version(
      options.swarming + '/get_slave_code')
  slaves = [
    FakeSwarmBot(
      options.swarming, options.dimensions, swarm_bot_version_hash, i, progress,
      options.consume, events, kill_event)
    for i in range(options.slaves)
  ]
  try:
    # Wait for all the slaves to come alive.
    while not all(s.is_alive() for s in slaves):
      time.sleep(0.01)
    progress.update_item('Ready to run')
    while slaves:
      progress.print_update()
      time.sleep(0.01)
      # The slaves could be told to die.
      slaves = [s for s in slaves if s.is_alive()]
  except KeyboardInterrupt:
    kill_event.set()

  progress.update_item('Waiting for slaves to quit.', raw=True)
  progress.update_item('')
  while slaves:
    progress.print_update()
    slaves = [s for s in slaves if s.is_alive()]
  # At this point, progress is not used anymore.
  print('')
  print('Ran for %.1fs.' % (time.time() - start))
  print('')
  results = events.queue
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option("-S", "--swarming", metavar="URL", default="", help="Swarming server to use")
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[("os", swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, "Load generated")
    group.add_option(
        "-s",
        "--send-rate",
        type="float",
        default=16.0,
        metavar="RATE",
        help="Rate (item/s) of sending requests as a float, default: %default",
    )
    group.add_option(
        "-D",
        "--duration",
        type="float",
        default=60.0,
        metavar="N",
        help="Duration (s) of the sending phase of the load test, " "default: %default",
    )
    group.add_option(
        "-m",
        "--concurrent",
        type="int",
        default=200,
        metavar="N",
        help="Maximum concurrent on-going requests, default: %default",
    )
    group.add_option(
        "-t",
        "--timeout",
        type="float",
        default=15 * 60.0,
        metavar="N",
        help="Task expiration and timeout to get results, the task itself will "
        "have %ds less than the value provided. Default: %%default" % TIMEOUT_OVERHEAD,
    )
    group.add_option(
        "-o", "--output-size", type="int", default=100, metavar="N", help="Bytes sent to stdout, default: %default"
    )
    group.add_option(
        "--sleep",
        type="int",
        default=60,
        metavar="N",
        help="Amount of time the bot should sleep, e.g. faking work, " "default: %default",
    )
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, "Display options")
    group.add_option(
        "--columns",
        type="int",
        default=graph.get_console_width(),
        metavar="N",
        help="For histogram display, default:%default",
    )
    group.add_option(
        "--buckets",
        type="int",
        default=20,
        metavar="N",
        help="Number of buckets for histogram display, default:%default",
    )
    parser.add_option_group(group)

    parser.add_option("--dump", metavar="FOO.JSON", help="Dumps to json file")
    parser.add_option("-v", "--verbose", action="store_true", help="Enables logging")

    options, args = parser.parse_args()
    logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error("Unsupported args: %s" % args)
    options.swarming = options.swarming.rstrip("/")
    if not options.swarming:
        parser.error("--swarming is required.")
    if options.duration <= 0:
        parser.error("Needs --duration > 0. 0.01 is a valid value.")
    swarming.process_filter_options(parser, options)

    total = int(round(options.send_rate * options.duration))
    print(
        "Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; "
        "total %d" % (options.send_rate, options.duration, options.concurrent, options.timeout, total)
    )
    print("[processing/processed/todo]")

    # This is used so there's no clash between runs and actual real usage.
    unique = "".join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [("processing", 0), ("processed", 0), ("todo", 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    results = []
    with threading_utils.ThreadPoolWithProgress(progress, 1, options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(round(duration * options.send_rate))
                while index < should_have_triggered_so_far:
                    pool.add_task(
                        0,
                        trigger_task,
                        options.swarming,
                        options.dimensions,
                        options.sleep,
                        options.output_size,
                        progress,
                        unique,
                        options.timeout,
                        index,
                    )
                    progress.update_item("", todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
            progress.update_item("Getting results for on-going tasks.", raw=True)
            for i in pool.iter_results():
                results.append(i)
                # This is a bit excessive but it's useful in the case where some tasks
                # hangs, so at least partial data is available.
                if options.dump:
                    results.sort()
                    if os.path.exists(options.dump):
                        os.rename(options.dump, options.dump + ".old")
                    with open(options.dump, "wb") as f:
                        json.dump(results, f, separators=(",", ":"))
            if not options.dump:
                results.sort()
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item("Got Ctrl-C. Aborted %d unsent tasks." % aborted, raw=True, todo=-aborted)
            progress.print_update()
    progress.print_update()
    # At this point, progress is not used anymore.
    print("")
    print(" - Took %.1fs." % (time.time() - start))
    print("")
    print_results(results, options.columns, options.buckets)
    return 0
Example #6
0
def main():
    colorama.init()

    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-I',
                      '--isolate-server',
                      metavar='URL',
                      default='',
                      help='Isolate server to use')
    parser.add_option('--namespace',
                      default='temporary%d-gzip' % time.time(),
                      metavar='XX',
                      help='Namespace to use on the server, default: %default')
    parser.add_option('--threads',
                      type='int',
                      default=16,
                      metavar='N',
                      help='Parallel worker threads to use, default:%default')
    graph.unit_option(parser,
                      '--items',
                      default=0,
                      help='Number of items to upload')
    graph.unit_option(parser,
                      '--max-size',
                      default=0,
                      help='Loop until this amount of data was transferred')
    graph.unit_option(parser,
                      '--mid-size',
                      default=100 * 1024,
                      help='Rough average size of each item, default:%default')
    parser.add_option('--columns',
                      type='int',
                      default=graph.get_console_width(),
                      metavar='N',
                      help='For histogram display, default:%default')
    parser.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('--dry-run',
                      action='store_true',
                      help='Do not send anything')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enable logging')
    options, args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    if bool(options.max_size) == bool(options.items):
        parser.error(
            'Use one of --max-size or --items.\n'
            '  Use --max-size if you want to run it until NN bytes where '
            'transfered.\n'
            '  Otherwise use --items to run it for NN items.')
    if not options.dry_run:
        options.isolate_server = options.isolate_server.rstrip('/')
        if not options.isolate_server:
            parser.error('--isolate-server is required.')

    print(' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' %
          (options.threads, options.items, options.max_size, options.mid_size))
    if options.dry_run:
        print(' - %sDRY RUN MODE%s' %
              (colorama.Fore.GREEN, colorama.Fore.RESET))

    start = time.time()

    random_pool = Randomness()
    print(' - Generated pool after %.1fs' % (time.time() - start))

    columns = [('index', 0), ('data', 0), ('size', options.items)]
    progress = Progress(columns)
    api = isolateserver.get_storage_api(options.isolate_server,
                                        options.namespace)
    do_item = functools.partial(
        send_and_receive, random_pool, options.dry_run,
        isolateserver.is_namespace_with_compression(options.namespace), api,
        progress)

    # TODO(maruel): Handle Ctrl-C should:
    # - Stop adding tasks.
    # - Stop scheduling tasks in ThreadPool.
    # - Wait for the remaining ungoing tasks to complete.
    # - Still print details and write the json file.
    with threading_utils.ThreadPoolWithProgress(progress, options.threads,
                                                options.threads, 0) as pool:
        if options.items:
            for _ in xrange(options.items):
                pool.add_task(0, do_item, gen_size(options.mid_size))
                progress.print_update()
        elif options.max_size:
            # This one is approximate.
            total = 0
            while True:
                size = gen_size(options.mid_size)
                progress.update_item('', size=1)
                progress.print_update()
                pool.add_task(0, do_item, size)
                total += size
                if total >= options.max_size:
                    break
        results = sorted(pool.join())

    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option('-t',
                     '--timeout',
                     type='float',
                     default=3600.,
                     metavar='N',
                     help='Timeout to get results, default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = options.send_rate * options.duration
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(duration *
                                                   options.send_rate)
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
        finally:
            # TODO(maruel): We could give up on collecting results for the on-going
            # tasks but that would need to be optional.
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            results = sorted(pool.join())
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0
def main():
  colorama.init()

  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-I', '--isolate-server',
      metavar='URL', default='',
      help='Isolate server to use')
  parser.add_option(
      '--namespace', default='temporary%d-gzip' % time.time(), metavar='XX',
      help='Namespace to use on the server, default: %default')
  parser.add_option(
      '--threads', type='int', default=16, metavar='N',
      help='Parallel worker threads to use, default:%default')

  data_group = optparse.OptionGroup(parser, 'Amount of data')
  graph.unit_option(
      data_group, '--items', default=0, help='Number of items to upload')
  graph.unit_option(
      data_group, '--max-size', default=0,
      help='Loop until this amount of data was transferred')
  graph.unit_option(
      data_group, '--mid-size', default=100*1024,
      help='Rough average size of each item, default:%default')
  parser.add_option_group(data_group)

  ui_group = optparse.OptionGroup(parser, 'Result histogram')
  ui_group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='Width of histogram, default:%default')
  ui_group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of histogram\'s buckets, default:%default')
  parser.add_option_group(ui_group)

  log_group = optparse.OptionGroup(parser, 'Logging')
  log_group.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  log_group.add_option(
      '-v', '--verbose', action='store_true', help='Enable logging')
  parser.add_option_group(log_group)

  options, args = parser.parse_args()

  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  if bool(options.max_size) == bool(options.items):
    parser.error(
        'Use one of --max-size or --items.\n'
        '  Use --max-size if you want to run it until NN bytes where '
        'transfered.\n'
        '  Otherwise use --items to run it for NN items.')
  options.isolate_server = options.isolate_server.rstrip('/')
  if not options.isolate_server:
    parser.error('--isolate-server is required.')

  print(
      ' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' % (
      options.threads, options.items, options.max_size, options.mid_size))

  start = time.time()

  random_pool = Randomness()
  print(' - Generated pool after %.1fs' % (time.time() - start))

  columns = [('index', 0), ('data', 0), ('size', options.items)]
  progress = Progress(columns)
  storage = isolateserver.get_storage(options.isolate_server, options.namespace)
  do_item = functools.partial(
      send_and_receive,
      random_pool,
      storage,
      progress)

  # TODO(maruel): Handle Ctrl-C should:
  # - Stop adding tasks.
  # - Stop scheduling tasks in ThreadPool.
  # - Wait for the remaining ungoing tasks to complete.
  # - Still print details and write the json file.
  with threading_utils.ThreadPoolWithProgress(
      progress, options.threads, options.threads, 0) as pool:
    if options.items:
      for _ in xrange(options.items):
        pool.add_task(0, do_item, gen_size(options.mid_size))
        progress.print_update()
    elif options.max_size:
      # This one is approximate.
      total = 0
      while True:
        size = gen_size(options.mid_size)
        progress.update_item('', size=1)
        progress.print_update()
        pool.add_task(0, do_item, size)
        total += size
        if total >= options.max_size:
          break
    results = sorted(pool.join())

  print('')
  print(' - Took %.1fs.' % (time.time() - start))
  print('')
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0