コード例 #1
0
def run_swarming_tasks_parallel(swarming_server, isolate_server,
                                extra_trigger_args, tasks):
    """Triggers swarming tasks in parallel and gets results.

  This is done by using one thread per task and shelling out swarming.py.

  Arguments:
    extra_trigger_args: list of additional flags to pass down to
        'swarming.py trigger'
    tasks: list of tuple(task_name, isolated_hash, dimensions) where dimension
        are --dimension flags to provide when triggering the task.

  Yields:
    tuple(name, dimensions, stdout) for the tasks that failed.
  """
    runs = len(tasks)
    # triger + collect
    total = 2 * runs
    failed_tasks = []
    progress = threading_utils.Progress([('index', 0), ('size', total)])
    progress.use_cr_only = False
    start = time.time()
    with threading_utils.ThreadPoolWithProgress(progress, runs, runs,
                                                total) as pool:
        runner = Runner(swarming_server, isolate_server, pool.add_task,
                        progress, extra_trigger_args)

        for task_name, isolated_hash, dimensions in tasks:
            pool.add_task(0, runner.trigger, task_name, isolated_hash,
                          dimensions)

        # Runner.collect() only return task failures.
        for failed_task in pool.iter_results():
            task_name, dimensions, stdout = failed_task
            yield task_name, dimensions, stdout
            failed_tasks.append(task_name)

    duration = time.time() - start
    print('\nCompleted in %3.2fs' % duration)
    if failed_tasks:
        print('Detected the following failures:')
        for task in sorted(failed_tasks):
            print('  %s' % task)
コード例 #2
0
def run_swarm_tests_on_swarm(oses, tests, logs, isolate_server, swarm_server):
    runs = len(tests) * len(oses)
    total = 3 * runs
    columns = [('index', 0), ('size', total)]
    progress = threading_utils.Progress(columns)
    progress.use_cr_only = False
    tempdir = tempfile.mkdtemp(prefix='swarm_client_tests')
    try:
        with threading_utils.ThreadPoolWithProgress(progress, runs, runs,
                                                    total) as pool:
            start = time.time()
            runner = Runner(isolate_server, swarm_server, pool.add_task,
                            progress, tempdir)
            for test in tests:
                for platform in oses:
                    pool.add_task(0, runner.archive, test, platform)

            failed_tests = pool.join()
            duration = time.time() - start
            print('')
    finally:
        shutil.rmtree(tempdir)

    if logs:
        os.makedirs(logs)
        for test, platform, stdout in failed_tests:
            name = '%s_%s' % (platform, os.path.basename(test))
            with open(os.path.join(logs, name + '.log'), 'wb') as f:
                f.write(stdout)

    print('Completed in %3.2fs' % duration)
    if failed_tests:
        failed_tests_per_os = {}
        for test, platform, _ in failed_tests:
            failed_tests_per_os.setdefault(test, []).append(platform)
        print('Detected the following failures:')
        for test, platforms in failed_tests_per_os.iteritems():
            print('  %s on %s' % (test, ', '.join(sorted(platforms))))
    return bool(failed_tests)
コード例 #3
0
def trace_test_cases(cmd, cwd_dir, test_cases, jobs, logname):
  """Traces each test cases individually but all in parallel."""
  assert os.path.isabs(cwd_dir) and os.path.isdir(cwd_dir), cwd_dir

  if not test_cases:
    return []

  # Resolve any symlink.
  cwd_dir = os.path.realpath(cwd_dir)
  assert os.path.isdir(cwd_dir)

  api = trace_inputs.get_api()
  api.clean_trace(logname)

  jobs = jobs or multiprocessing.cpu_count()
  # Try to do black magic here by guessing a few of the run_test_cases.py
  # flags. It's cheezy but it works.
  for i, v in enumerate(cmd):
    if v.endswith('run_test_cases.py'):
      # Found it. Process the arguments here.
      _, options, _ = run_test_cases.process_args(cmd[i:])
      # Always override with the lowest value.
      jobs = min(options.jobs, jobs)
      break

  columns = [('index', 0), ('size', len(test_cases))]
  progress = threading_utils.Progress(columns)
  with threading_utils.ThreadPoolWithProgress(
      progress, jobs, jobs, len(test_cases)) as pool:
    with api.get_tracer(logname) as tracer:
      function = Tracer(tracer, cmd, cwd_dir, progress).map
      for test_case in test_cases:
        pool.add_task(0, function, test_case)

      results = pool.join()
  print('')
  return results
コード例 #4
0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option(
        '-t',
        '--timeout',
        type='float',
        default=15 * 60.,
        metavar='N',
        help='Task expiration and timeout to get results, the task itself will '
        'have %ds less than the value provided. Default: %%default' %
        TIMEOUT_OVERHEAD)
    group.add_option('-o',
                     '--output-size',
                     type='int',
                     default=100,
                     metavar='N',
                     help='Bytes sent to stdout, default: %default')
    group.add_option(
        '--sleep',
        type='int',
        default=60,
        metavar='N',
        help='Amount of time the bot should sleep, e.g. faking work, '
        'default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = int(round(options.send_rate * options.duration))
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    results = []
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(
                    round(duration * options.send_rate))
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, options.sleep,
                                  options.output_size, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            for i in pool.iter_results():
                results.append(i)
                # This is a bit excessive but it's useful in the case where some tasks
                # hangs, so at least partial data is available.
                if options.dump:
                    results.sort()
                    if os.path.exists(options.dump):
                        os.rename(options.dump, options.dump + '.old')
                    with open(options.dump, 'wb') as f:
                        json.dump(results, f, separators=(',', ':'))
            if not options.dump:
                results.sort()
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    return 0
コード例 #5
0
def main():
    colorama.init()

    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-I',
                      '--isolate-server',
                      metavar='URL',
                      default='',
                      help='Isolate server to use')
    parser.add_option('--namespace',
                      default='temporary%d-gzip' % time.time(),
                      metavar='XX',
                      help='Namespace to use on the server, default: %default')
    parser.add_option('--threads',
                      type='int',
                      default=16,
                      metavar='N',
                      help='Parallel worker threads to use, default:%default')
    graph.unit_option(parser,
                      '--items',
                      default=0,
                      help='Number of items to upload')
    graph.unit_option(parser,
                      '--max-size',
                      default=0,
                      help='Loop until this amount of data was transferred')
    graph.unit_option(parser,
                      '--mid-size',
                      default=100 * 1024,
                      help='Rough average size of each item, default:%default')
    parser.add_option('--columns',
                      type='int',
                      default=graph.get_console_width(),
                      metavar='N',
                      help='For histogram display, default:%default')
    parser.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('--dry-run',
                      action='store_true',
                      help='Do not send anything')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enable logging')
    options, args = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    if bool(options.max_size) == bool(options.items):
        parser.error(
            'Use one of --max-size or --items.\n'
            '  Use --max-size if you want to run it until NN bytes where '
            'transfered.\n'
            '  Otherwise use --items to run it for NN items.')
    if not options.dry_run:
        options.isolate_server = options.isolate_server.rstrip('/')
        if not options.isolate_server:
            parser.error('--isolate-server is required.')

    print(' - Using %d thread,  items=%d,  max-size=%d,  mid-size=%d' %
          (options.threads, options.items, options.max_size, options.mid_size))
    if options.dry_run:
        print(' - %sDRY RUN MODE%s' %
              (colorama.Fore.GREEN, colorama.Fore.RESET))

    start = time.time()

    random_pool = Randomness()
    print(' - Generated pool after %.1fs' % (time.time() - start))

    columns = [('index', 0), ('data', 0), ('size', options.items)]
    progress = Progress(columns)
    api = isolateserver.get_storage_api(options.isolate_server,
                                        options.namespace)
    do_item = functools.partial(
        send_and_receive, random_pool, options.dry_run,
        isolateserver.is_namespace_with_compression(options.namespace), api,
        progress)

    # TODO(maruel): Handle Ctrl-C should:
    # - Stop adding tasks.
    # - Stop scheduling tasks in ThreadPool.
    # - Wait for the remaining ungoing tasks to complete.
    # - Still print details and write the json file.
    with threading_utils.ThreadPoolWithProgress(progress, options.threads,
                                                options.threads, 0) as pool:
        if options.items:
            for _ in xrange(options.items):
                pool.add_task(0, do_item, gen_size(options.mid_size))
                progress.print_update()
        elif options.max_size:
            # This one is approximate.
            total = 0
            while True:
                size = gen_size(options.mid_size)
                progress.update_item('', size=1)
                progress.print_update()
                pool.add_task(0, do_item, size)
                total += size
                if total >= options.max_size:
                    break
        results = sorted(pool.join())

    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0
コード例 #6
0
def main():
    colorama.init()
    parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
    parser.add_option('-S',
                      '--swarming',
                      metavar='URL',
                      default='',
                      help='Swarming server to use')
    swarming.add_filter_options(parser)
    parser.set_defaults(dimensions=[('os', swarming_load_test_bot.OS_NAME)])

    group = optparse.OptionGroup(parser, 'Load generated')
    group.add_option(
        '-s',
        '--send-rate',
        type='float',
        default=16.,
        metavar='RATE',
        help='Rate (item/s) of sending requests as a float, default: %default')
    group.add_option(
        '-D',
        '--duration',
        type='float',
        default=60.,
        metavar='N',
        help='Duration (s) of the sending phase of the load test, '
        'default: %default')
    group.add_option(
        '-m',
        '--concurrent',
        type='int',
        default=200,
        metavar='N',
        help='Maximum concurrent on-going requests, default: %default')
    group.add_option('-t',
                     '--timeout',
                     type='float',
                     default=3600.,
                     metavar='N',
                     help='Timeout to get results, default: %default')
    parser.add_option_group(group)

    group = optparse.OptionGroup(parser, 'Display options')
    group.add_option('--columns',
                     type='int',
                     default=graph.get_console_width(),
                     metavar='N',
                     help='For histogram display, default:%default')
    group.add_option(
        '--buckets',
        type='int',
        default=20,
        metavar='N',
        help='Number of buckets for histogram display, default:%default')
    parser.add_option_group(group)

    parser.add_option('--dump', metavar='FOO.JSON', help='Dumps to json file')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='Enables logging')

    options, args = parser.parse_args()
    logging.basicConfig(
        level=logging.INFO if options.verbose else logging.FATAL)
    if args:
        parser.error('Unsupported args: %s' % args)
    options.swarming = options.swarming.rstrip('/')
    if not options.swarming:
        parser.error('--swarming is required.')
    if options.duration <= 0:
        parser.error('Needs --duration > 0. 0.01 is a valid value.')
    swarming.process_filter_options(parser, options)

    total = options.send_rate * options.duration
    print(
        'Sending %.1f i/s for %ds with max %d parallel requests; timeout %.1fs; '
        'total %d' % (options.send_rate, options.duration, options.concurrent,
                      options.timeout, total))
    print('[processing/processed/todo]')

    # This is used so there's no clash between runs and actual real usage.
    unique = ''.join(random.choice(string.ascii_letters) for _ in range(8))
    columns = [('processing', 0), ('processed', 0), ('todo', 0)]
    progress = threading_utils.Progress(columns)
    index = 0
    with threading_utils.ThreadPoolWithProgress(progress, 1,
                                                options.concurrent, 0) as pool:
        try:
            start = time.time()
            while True:
                duration = time.time() - start
                if duration > options.duration:
                    break
                should_have_triggered_so_far = int(duration *
                                                   options.send_rate)
                while index < should_have_triggered_so_far:
                    pool.add_task(0, trigger_task, options.swarming,
                                  options.dimensions, progress, unique,
                                  options.timeout, index)
                    progress.update_item('', todo=1)
                    index += 1
                    progress.print_update()
                time.sleep(0.01)
        except KeyboardInterrupt:
            aborted = pool.abort()
            progress.update_item('Got Ctrl-C. Aborted %d unsent tasks.' %
                                 aborted,
                                 raw=True,
                                 todo=-aborted)
            progress.print_update()
        finally:
            # TODO(maruel): We could give up on collecting results for the on-going
            # tasks but that would need to be optional.
            progress.update_item('Getting results for on-going tasks.',
                                 raw=True)
            results = sorted(pool.join())
    progress.print_update()
    # At this point, progress is not used anymore.
    print('')
    print(' - Took %.1fs.' % (time.time() - start))
    print('')
    print_results(results, options.columns, options.buckets)
    if options.dump:
        with open(options.dump, 'w') as f:
            json.dump(results, f, separators=(',', ':'))
    return 0