Exemple #1
0
def run_serial(swarming_server, isolate_server, priority, deadline, repeat,
               isolated_hash, name, bots):
    """Runs the task one at a time.

  This will be mainly bound by task scheduling latency, especially if the bots
  are busy and the priority is low.
  """
    result = 0
    for i in xrange(repeat):
        for bot in bots:
            suffix = '/%d' % i if repeat > 1 else ''
            task_name = parallel_execution.task_to_name(
                name, {'id': bot}, isolated_hash) + suffix
            cmd = [
                sys.executable,
                'swarming.py',
                'run',
                '--swarming',
                swarming_server,
                '--isolate-server',
                isolate_server,
                '--priority',
                priority,
                '--deadline',
                deadline,
                '--dimension',
                'id',
                bot,
                '--task-name',
                task_name,
                isolated_hash,
            ]
            r = subprocess.call(cmd, cwd=ROOT_DIR)
            result = max(r, result)
    return result
Exemple #2
0
def run_serial(
    swarming_server, isolate_server, priority, deadline, repeat, isolated_hash,
    name, bots):
  """Runs the task one at a time.

  This will be mainly bound by task scheduling latency, especially if the bots
  are busy and the priority is low.
  """
  result = 0
  for i in xrange(repeat):
    for bot in bots:
      suffix = '/%d' % i if repeat > 1 else ''
      task_name = parallel_execution.task_to_name(
          name, {'id': bot}, isolated_hash) + suffix
      cmd = [
        sys.executable, 'swarming.py', 'run',
        '--swarming', swarming_server,
        '--isolate-server', isolate_server,
        '--priority', priority,
        '--deadline', deadline,
        '--dimension', 'id', bot,
        '--task-name', task_name,
        isolated_hash,
      ]
      r = subprocess.call(cmd, cwd=ROOT_DIR)
      result = max(r, result)
  return result
def run_swarming_tests_on_swarming(
    swarming_server, isolate_server, priority, oses, tests, logs,
    no_idempotent):
  """Archives, triggers swarming jobs and gets results."""
  print('Archiving the whole tree.')
  start = time.time()
  tree_isolated = archive_tree(isolate_server)

  # Create and archive all the .isolated files.
  isolateds = archive_isolated_triggers(isolate_server, tree_isolated, tests)
  print('Archival took %3.2fs' % (time.time() - start))

  exploded = []
  for test_path, isolated_hash in isolateds:
    logging.debug('%s: %s', test_path, isolated_hash)
    test_name = os.path.basename(test_path).split('.')[0]
    for platform in oses:
      exploded.append((test_name, platform, isolated_hash))

  tasks = [
    (
      parallel_execution.task_to_name(name, {'os': platform}, isolated_hash),
      isolated_hash,
      {'os': platform},
    ) for name, platform, isolated_hash in exploded
  ]

  extra_args = [
    '--hard-timeout', '180',
  ]
  if not no_idempotent:
    extra_args.append('--idempotent')
  if priority:
    extra_args.extend(['--priority', str(priority)])
    print('Using priority %s' % priority)

  result = 0
  for failed_task in parallel_execution.run_swarming_tasks_parallel(
      swarming_server, isolate_server, extra_args, tasks):
    test_name, dimensions, stdout = failed_task
    if logs:
      # Write the logs are they are retrieved.
      if not os.path.isdir(logs):
        os.makedirs(logs)
      name = '%s_%s.log' % (dimensions['os'], test_name.split('/', 1)[0])
      with open(os.path.join(logs, name), 'wb') as f:
        f.write(stdout)
    result = 1
  return result
Exemple #4
0
def run_swarming_tests_on_swarming(swarming_server, isolate_server, priority,
                                   oses, tests, logs, no_idempotent):
    """Archives, triggers swarming jobs and gets results."""
    print('Archiving the whole tree.')
    start = time.time()
    tree_isolated = archive_tree(isolate_server)

    # Create and archive all the .isolated files.
    isolateds = archive_isolated_triggers(isolate_server, tree_isolated, tests)
    print('Archival took %3.2fs' % (time.time() - start))

    exploded = []
    for test_path, isolated_hash in isolateds:
        logging.debug('%s: %s', test_path, isolated_hash)
        test_name = os.path.basename(test_path).split('.')[0]
        for platform in oses:
            exploded.append((test_name, platform, isolated_hash))

    tasks = [(
        parallel_execution.task_to_name(name, {'os': platform}, isolated_hash),
        isolated_hash,
        {
            'os': platform
        },
    ) for name, platform, isolated_hash in exploded]

    extra_args = [
        '--hard-timeout',
        '180',
    ]
    if not no_idempotent:
        extra_args.append('--idempotent')
    if priority:
        extra_args.extend(['--priority', str(priority)])
        print('Using priority %s' % priority)

    result = 0
    for failed_task in parallel_execution.run_swarming_tasks_parallel(
            swarming_server, isolate_server, extra_args, tasks):
        test_name, dimensions, stdout = failed_task
        if logs:
            # Write the logs are they are retrieved.
            if not os.path.isdir(logs):
                os.makedirs(logs)
            name = '%s_%s.log' % (dimensions['os'], test_name.split('/', 1)[0])
            with open(os.path.join(logs, name), 'wb') as f:
                f.write(stdout)
        result = 1
    return result
def run_batches(swarming_server, isolate_server, dimensions, caches, tags, env,
                priority, deadline, batches, repeat, isolated_hash, name, bots,
                args):
  """Runs the task |batches| at a time.

  This will be mainly bound by task scheduling latency, especially if the bots
  are busy and the priority is low.
  """
  sem = threading.Semaphore(batches)
  threads = []
  for i in range(repeat):
    for bot in bots:
      suffix = '/%d' % i if repeat > 1 else ''
      task_name = parallel_execution.task_to_name(
            name, {'id': bot}, isolated_hash) + suffix
      cmd = [
          sys.executable,
          'swarming.py',
          'run',
          '--swarming',
          swarming_server,
          '--isolate-server',
          isolate_server,
          '--priority',
          priority,
          '--deadline',
          deadline,
          '--dimension',
          'id',
          bot,
          '--task-name',
          task_name,
          '-s',
          isolated_hash,
      ]
      for k, v in sorted(dimensions.items()):
        cmd.extend(('-d', k, v))
      for k, v in sorted(caches):
        cmd.extend(('--named-cache', k, v))
      for t in sorted(tags):
        cmd.extend(('--tags', t))
      for k, v in env:
        cmd.extend(('--env', k, v))
      if args:
        cmd.append('--')
        cmd.extend(args)
      threads.append(batched_subprocess(cmd, sem))
  for t in threads:
    t.join()
def run_serial(swarming_server, isolate_server, dimensions, caches, tags, env,
               priority, deadline, repeat, isolated_hash, name, bots, args):
  """Runs the task one at a time.

  This will be mainly bound by task scheduling latency, especially if the bots
  are busy and the priority is low.
  """
  result = 0
  for i in range(repeat):
    for bot in bots:
      suffix = '/%d' % i if repeat > 1 else ''
      task_name = parallel_execution.task_to_name(
          name, {'id': bot}, isolated_hash) + suffix
      cmd = [
          sys.executable,
          'swarming.py',
          'run',
          '--swarming',
          swarming_server,
          '--isolate-server',
          isolate_server,
          '--priority',
          priority,
          '--deadline',
          deadline,
          '--dimension',
          'id',
          bot,
          '--task-name',
          task_name,
          '-s',
          isolated_hash,
      ]
      for k, v in sorted(dimensions.items()):
        cmd.extend(('-d', k, v))
      for k, v in sorted(caches):
        cmd.extend(('--named-cache', k, v))
      for t in sorted(tags):
        cmd.extend(('--tags', t))
      for k, v in env:
        cmd.extend(('--env', k, v))
      if args:
        cmd.append('--')
        cmd.extend(args)
      r = subprocess.call(cmd, cwd=CLIENT_DIR)
      result = max(r, result)
  return result
def run_parallel(swarming_server, isolate_server, dimensions, caches, env,
                 priority, deadline, repeat, isolated_hash, name, bots, args):
  tasks = []
  for i in range(repeat):
    suffix = '/%d' % i if repeat > 1 else ''
    for bot in bots:
      d = {'id': bot}
      tname = parallel_execution.task_to_name(name, d, isolated_hash) + suffix
      d.update(dimensions)
      tasks.append((tname, isolated_hash, d, caches, env))
  extra_args = ['--priority', priority, '--deadline', deadline]
  extra_args.extend(args)
  print('Using priority %s' % priority)
  for failed_task in parallel_execution.run_swarming_tasks_parallel(
      swarming_server, isolate_server, extra_args, tasks):
    _name, dimensions, stdout = failed_task
    print('%sFailure: %s%s\n%s' % (
      colorama.Fore.RED, dimensions, colorama.Fore.RESET, stdout))
Exemple #8
0
def run_parallel(swarming_server, isolate_server, priority, deadline, repeat,
                 isolated_hash, name, bots):
    tasks = []
    for i in xrange(repeat):
        suffix = '/%d' % i if repeat > 1 else ''
        tasks.extend((
            parallel_execution.task_to_name(name, {'id': bot}, isolated_hash) +
            suffix,
            isolated_hash,
            {
                'id': bot
            },
        ) for bot in bots)
    extra_args = ['--priority', priority, '--deadline', deadline]
    print('Using priority %s' % priority)
    for failed_task in parallel_execution.run_swarming_tasks_parallel(
            swarming_server, isolate_server, extra_args, tasks):
        _name, dimensions, stdout = failed_task
        print('%sFailure: %s%s\n%s' %
              (colorama.Fore.RED, dimensions, colorama.Fore.RESET, stdout))
Exemple #9
0
def run_parallel(
    swarming_server, isolate_server, priority, deadline, repeat, isolated_hash,
    name, bots):
  tasks = []
  for i in xrange(repeat):
    suffix = '/%d' % i if repeat > 1 else ''
    tasks.extend(
        (
          parallel_execution.task_to_name(
              name, {'id': bot}, isolated_hash) + suffix,
          isolated_hash,
          {'id': bot},
        ) for bot in bots)
  extra_args = ['--priority', priority, '--deadline', deadline]
  print('Using priority %s' % priority)
  for failed_task in parallel_execution.run_swarming_tasks_parallel(
      swarming_server, isolate_server, extra_args, tasks):
    _name, dimensions, stdout = failed_task
    print('%sFailure: %s%s\n%s' % (
      colorama.Fore.RED, dimensions, colorama.Fore.RESET, stdout))