def run_swarming_tasks_parallel(swarming_server, isolate_server,
                                extra_trigger_args, tasks):
    """Triggers swarming tasks in parallel and gets results.

  This is done by using one thread per task and shelling out swarming.py.

  Arguments:
    extra_trigger_args: list of additional flags to pass down to
        'swarming.py trigger'
    tasks: list of tuple(task_name, isolated_hash, dimensions, caches, env)
        where dimension are --dimension flags to provide when triggering the
        task and caches are --named-cache flags.

  Yields:
    tuple(name, dimensions, stdout) for the tasks that failed.
  """
    runs = len(tasks)
    # triger + collect
    total = 2 * runs
    failed_tasks = []
    progress = threading_utils.Progress([('index', 0), ('size', total)])
    progress.use_cr_only = False
    start = time.time()
    with threading_utils.ThreadPoolWithProgress(progress, runs, runs,
                                                total) as pool:
        runner = Runner(swarming_server, isolate_server, pool.add_task,
                        progress, extra_trigger_args)

        for task_name, isolated_hash, dimensions, caches, env in tasks:
            pool.add_task(0, runner.trigger, task_name, isolated_hash,
                          dimensions, caches, env)

        # Runner.collect() only return task failures.
        for failed_task in pool.iter_results():
            task_name, dimensions, stdout = failed_task
            yield task_name, dimensions, stdout
            failed_tasks.append(task_name)

    duration = time.time() - start
    print('\nCompleted in %3.2fs' % duration)
    if failed_tasks:
        print('Detected the following failures:')
        for task in sorted(failed_tasks):
            print('  %s' % task)
def run_swarm_tests_on_swarm(oses, tests, logs, isolate_server, swarm_server):
    runs = len(tests) * len(oses)
    total = 3 * runs
    columns = [('index', 0), ('size', total)]
    progress = threading_utils.Progress(columns)
    progress.use_cr_only = False
    tempdir = tempfile.mkdtemp(prefix='swarm_client_tests')
    try:
        with threading_utils.ThreadPoolWithProgress(progress, runs, runs,
                                                    total) as pool:
            start = time.time()
            runner = Runner(isolate_server, swarm_server, pool.add_task,
                            progress, tempdir)
            for test in tests:
                for platform in oses:
                    pool.add_task(0, runner.archive, test, platform)

            failed_tests = pool.join()
            duration = time.time() - start
            print('')
    finally:
        shutil.rmtree(tempdir)

    if logs:
        os.makedirs(logs)
        for test, platform, stdout in failed_tests:
            name = '%s_%s' % (platform, os.path.basename(test))
            with open(os.path.join(logs, name + '.log'), 'wb') as f:
                f.write(stdout)

    print('Completed in %3.2fs' % duration)
    if failed_tests:
        failed_tests_per_os = {}
        for test, platform, _ in failed_tests:
            failed_tests_per_os.setdefault(test, []).append(platform)
        print('Detected the following failures:')
        for test, platforms in failed_tests_per_os.iteritems():
            print('  %s on %s' % (test, ', '.join(sorted(platforms))))
    return bool(failed_tests)
Пример #3
0
def trace_test_cases(cmd, cwd_dir, test_cases, jobs, logname):
  """Traces each test cases individually but all in parallel."""
  assert os.path.isabs(cwd_dir) and os.path.isdir(cwd_dir), cwd_dir

  if not test_cases:
    return []

  # Resolve any symlink.
  cwd_dir = os.path.realpath(cwd_dir)
  assert os.path.isdir(cwd_dir)

  api = trace_inputs.get_api()
  api.clean_trace(logname)

  jobs = jobs or multiprocessing.cpu_count()
  # Try to do black magic here by guessing a few of the run_test_cases.py
  # flags. It's cheezy but it works.
  for i, v in enumerate(cmd):
    if v.endswith('run_test_cases.py'):
      # Found it. Process the arguments here.
      _, options, _ = run_test_cases.process_args(cmd[i:])
      # Always override with the lowest value.
      jobs = min(options.jobs, jobs)
      break

  columns = [('index', 0), ('size', len(test_cases))]
  progress = threading_utils.Progress(columns)
  with threading_utils.ThreadPoolWithProgress(
      progress, jobs, jobs, len(test_cases)) as pool:
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, cmd, cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(0, function, test_case)

      results = pool.join()
  print('')
  return results
Пример #4
0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option(
        '-t',
        '--timeout',
        type='float',
        default=15 * 60.,
        metavar='N',
        help='Task expiration and timeout to get results, the task itself will '
        'have %ds less than the value provided. Default: %%default' %
        TIMEOUT_OVERHEAD)
    group.add_option('-o',
                     '--output-size',
                     type='int',
                     default=100,
                     metavar='N',
                     help='Bytes sent to stdout, default: %default')
    group.add_option(
        '--sleep',
        type='int',
        default=60,
        metavar='N',
        help='Amount of time the bot should sleep, e.g. faking work, '
        'default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = int(round(options.send_rate * options.duration))
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    results = []
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(
                    round(duration * options.send_rate))
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, options.sleep,
                                  options.output_size, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            for i in pool.iter_results():
                results.append(i)
                # This is a bit excessive but it's useful in the case where some tasks
                # hangs, so at least partial data is available.
                if options.dump:
                    results.sort()
                    if os.path.exists(options.dump):
                        os.rename(options.dump, options.dump + '.old')
                    with open(options.dump, 'wb') as f:
                        json.dump(results, f, separators=(',', ':'))
            if not options.dump:
                results.sort()
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    return 0
Пример #5
0
def main():
  colorama.init()
  parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
  parser.add_option(
      '-S', '--swarming',
      metavar='URL', default='',
      help='Swarming server to use')
  swarming.add_filter_options(parser)
  # Use improbable values to reduce the chance of interferring with real slaves.
  parser.set_defaults(
      dimensions=[
        ('bits', '36'),
        ('machine', os.uname()[4] + '-experimental'),
        ('os', OS_NAME),
      ])

  group = optparse.OptionGroup(parser, 'Load generated')
  group.add_option(
      '--slaves', type='int', default=300, metavar='N',
      help='Number of swarm bot slaves, default: %default')
  group.add_option(
      '-c', '--consume', type='float', default=60., metavar='N',
      help='Duration (s) for consuming a request, default: %default')
  parser.add_option_group(group)

  group = optparse.OptionGroup(parser, 'Display options')
  group.add_option(
      '--columns', type='int', default=graph.get_console_width(), metavar='N',
      help='For histogram display, default:%default')
  group.add_option(
      '--buckets', type='int', default=20, metavar='N',
      help='Number of buckets for histogram display, default:%default')
  parser.add_option_group(group)

  parser.add_option(
      '--dump', metavar='FOO.JSON', help='Dumps to json file')
  parser.add_option(
      '-v', '--verbose', action='store_true', help='Enables logging')

  options, args = parser.parse_args()
  logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
  if args:
    parser.error('Unsupported args: %s' % args)
  options.swarming = options.swarming.rstrip('/')
  if not options.swarming:
    parser.error('--swarming is required.')
  if options.consume <= 0:
    parser.error('Needs --consume > 0. 0.01 is a valid value.')
  swarming.process_filter_options(parser, options)

  print(
      'Running %d slaves, each task lasting %.1fs' % (
        options.slaves, options.consume))
  print('Ctrl-C to exit.')
  print('[processing/processed/bots]')
  columns = [('processing', 0), ('processed', 0), ('bots', 0)]
  progress = threading_utils.Progress(columns)
  events = Queue.Queue()
  start = time.time()
  kill_event = threading.Event()
  swarm_bot_version_hash = calculate_version(
      options.swarming + '/get_slave_code')
  slaves = [
    FakeSwarmBot(
      options.swarming, options.dimensions, swarm_bot_version_hash, i, progress,
      options.consume, events, kill_event)
    for i in range(options.slaves)
  ]
  try:
    # Wait for all the slaves to come alive.
    while not all(s.is_alive() for s in slaves):
      time.sleep(0.01)
    progress.update_item('Ready to run')
    while slaves:
      progress.print_update()
      time.sleep(0.01)
      # The slaves could be told to die.
      slaves = [s for s in slaves if s.is_alive()]
  except KeyboardInterrupt:
    kill_event.set()

  progress.update_item('Waiting for slaves to quit.', raw=True)
  progress.update_item('')
  while slaves:
    progress.print_update()
    slaves = [s for s in slaves if s.is_alive()]
  # At this point, progress is not used anymore.
  print('')
  print('Ran for %.1fs.' % (time.time() - start))
  print('')
  results = events.queue
  print_results(results, options.columns, options.buckets)
  if options.dump:
    with open(options.dump, 'w') as f:
      json.dump(results, f, separators=(',',':'))
  return 0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option('-t',
                     '--timeout',
                     type='float',
                     default=3600.,
                     metavar='N',
                     help='Timeout to get results, default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = options.send_rate * options.duration
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(duration *
                                                   options.send_rate)
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
        finally:
            # TODO(maruel): We could give up on collecting results for the on-going
            # tasks but that would need to be optional.
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            results = sorted(pool.join())
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0