Пример #1
0
          jobs.append(test_job)

  if not jobs:
    print 'No jobs to run.'
    for image in docker_images.itervalues():
      dockerjob.remove_image(image, skip_nonexistent=True)
    sys.exit(1)

  num_failures, resultset = jobset.run(jobs, newline_on_success=True, 
                                       maxjobs=args.jobs)
  if num_failures:
    jobset.message('FAILED', 'Some tests failed', do_newline=True)
  else:
    jobset.message('SUCCESS', 'All tests passed', do_newline=True)

  report_utils.render_xml_report(resultset, 'report.xml')
  
  report_utils.render_html_report(
      set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES, 
      resultset, num_failures, args.cloud_to_prod_auth or args.cloud_to_prod)

finally:
  # Check if servers are still running.
  for server, job in server_jobs.iteritems():
    if not job.is_running():
      print 'Server "%s" has exited prematurely.' % server

  dockerjob.finish_jobs([j for j in server_jobs.itervalues()])

  for image in docker_images.itervalues():
    print 'Removing docker image %s' % image
Пример #2
0
def _build_and_run(
    check_cancelled, newline_on_success, travis, cache, xml_report=None):
  """Do one pass of building & running tests."""
  # build latest sequentially
  num_failures, _ = jobset.run(
      build_steps, maxjobs=1, stop_on_failure=True,
      newline_on_success=newline_on_success, travis=travis)
  if num_failures:
    return 1

  # start antagonists
  antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
                 for _ in range(0, args.antagonists)]
  port_server_port = 32767
  _start_port_server(port_server_port)
  resultset = None
  try:
    infinite_runs = runs_per_test == 0
    one_run = set(
      spec
      for config in run_configs
      for language in languages
      for spec in language.test_specs(config, args.travis)
      if re.search(args.regex, spec.shortname))
    # When running on travis, we want out test runs to be as similar as possible
    # for reproducibility purposes.
    if travis:
      massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
    else:
      # whereas otherwise, we want to shuffle things up to give all tests a
      # chance to run.
      massaged_one_run = list(one_run)  # random.shuffle needs an indexable seq.
      random.shuffle(massaged_one_run)  # which it modifies in-place.
    if infinite_runs:
      assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
    runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
                     else itertools.repeat(massaged_one_run, runs_per_test))
    all_runs = itertools.chain.from_iterable(runs_sequence)

    number_failures, resultset = jobset.run(
        all_runs, check_cancelled, newline_on_success=newline_on_success,
        travis=travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
        stop_on_failure=args.stop_on_failure,
        cache=cache if not xml_report else None,
        add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
    if resultset:
      for k, v in resultset.iteritems():
        num_runs, num_failures = _calculate_num_runs_failures(v)
        if num_failures == num_runs:  # what about infinite_runs???
          jobset.message('FAILED', k, do_newline=True)
        elif num_failures > 0:
          jobset.message(
              'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
              do_newline=True)
        else:
          jobset.message('PASSED', k, do_newline=True)
    if number_failures:
      return 2
  finally:
    for antagonist in antagonists:
      antagonist.kill()
    if xml_report and resultset:
      report_utils.render_xml_report(resultset, xml_report)

  number_failures, _ = jobset.run(
      post_tests_steps, maxjobs=1, stop_on_failure=True,
      newline_on_success=newline_on_success, travis=travis)
  if number_failures:
    return 3

  if cache: cache.save()

  return 0
Пример #3
0
def _build_and_run(check_cancelled,
                   newline_on_success,
                   travis,
                   cache,
                   xml_report=None):
    """Do one pass of building & running tests."""
    # build latest sequentially
    num_failures, _ = jobset.run(build_steps,
                                 maxjobs=1,
                                 stop_on_failure=True,
                                 newline_on_success=newline_on_success,
                                 travis=travis)
    if num_failures:
        return 1

    # start antagonists
    antagonists = [
        subprocess.Popen(['tools/run_tests/antagonist.py'])
        for _ in range(0, args.antagonists)
    ]
    port_server_port = 32767
    _start_port_server(port_server_port)
    resultset = None
    try:
        infinite_runs = runs_per_test == 0
        one_run = set(spec for config in run_configs for language in languages
                      for spec in language.test_specs(config, args.travis)
                      if re.search(args.regex, spec.shortname))
        # When running on travis, we want out test runs to be as similar as possible
        # for reproducibility purposes.
        if travis:
            massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
        else:
            # whereas otherwise, we want to shuffle things up to give all tests a
            # chance to run.
            massaged_one_run = list(
                one_run)  # random.shuffle needs an indexable seq.
            random.shuffle(massaged_one_run)  # which it modifies in-place.
        if infinite_runs:
            assert len(massaged_one_run
                       ) > 0, 'Must have at least one test for a -n inf run'
        runs_sequence = (itertools.repeat(massaged_one_run)
                         if infinite_runs else itertools.repeat(
                             massaged_one_run, runs_per_test))
        all_runs = itertools.chain.from_iterable(runs_sequence)

        number_failures, resultset = jobset.run(
            all_runs,
            check_cancelled,
            newline_on_success=newline_on_success,
            travis=travis,
            infinite_runs=infinite_runs,
            maxjobs=args.jobs,
            stop_on_failure=args.stop_on_failure,
            cache=cache if not xml_report else None,
            add_env={
                'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port
            })
        if resultset:
            for k, v in resultset.iteritems():
                num_runs, num_failures = _calculate_num_runs_failures(v)
                if num_failures == num_runs:  # what about infinite_runs???
                    jobset.message('FAILED', k, do_newline=True)
                elif num_failures > 0:
                    jobset.message('FLAKE',
                                   '%s [%d/%d runs flaked]' %
                                   (k, num_failures, num_runs),
                                   do_newline=True)
                else:
                    jobset.message('PASSED', k, do_newline=True)
        if number_failures:
            return 2
    finally:
        for antagonist in antagonists:
            antagonist.kill()
        if xml_report and resultset:
            report_utils.render_xml_report(resultset, xml_report)

    number_failures, _ = jobset.run(post_tests_steps,
                                    maxjobs=1,
                                    stop_on_failure=True,
                                    newline_on_success=newline_on_success,
                                    travis=travis)
    if number_failures:
        return 3

    if cache: cache.save()

    return 0