Example #1
0
    if not jobs:
        print 'No jobs to run.'
        for image in docker_images.itervalues():
            dockerjob.remove_image(image, skip_nonexistent=True)
        sys.exit(1)

    num_failures, resultset = jobset.run(jobs,
                                         newline_on_success=True,
                                         maxjobs=args.jobs)
    if num_failures:
        jobset.message('FAILED', 'Some tests failed', do_newline=True)
    else:
        jobset.message('SUCCESS', 'All tests passed', do_newline=True)

    report_utils.render_junit_xml_report(resultset, 'report.xml')

    for name, job in resultset.iteritems():
        if "http2" in name:
            job[0].http2results = aggregate_http2_results(job[0].message)

    report_utils.render_interop_html_report(
        set([str(l) for l in languages]), servers, _TEST_CASES,
        _AUTH_TEST_CASES, _HTTP2_TEST_CASES, resultset, num_failures,
        args.cloud_to_prod_auth or args.cloud_to_prod, args.http2_interop)

finally:
    # Check if servers are still running.
    for server, job in server_jobs.iteritems():
        if not job.is_running():
            print 'Server "%s" has exited prematurely.' % server
Example #2
0
def _build_and_run(
    check_cancelled, newline_on_success, cache, xml_report=None, build_only=False):
  """Do one pass of building & running tests."""
  # build latest sequentially
  num_failures, resultset = jobset.run(
      build_steps, maxjobs=1, stop_on_failure=True,
      newline_on_success=newline_on_success, travis=args.travis)
  if num_failures:
    return [BuildAndRunError.BUILD]

  if build_only:
    if xml_report:
      report_utils.render_junit_xml_report(resultset, xml_report)
    return []

  # start antagonists
  antagonists = [subprocess.Popen(['tools/run_tests/antagonist.py'])
                 for _ in range(0, args.antagonists)]
  port_server_port = 32767
  _start_port_server(port_server_port)
  resultset = None
  num_test_failures = 0
  try:
    infinite_runs = runs_per_test == 0
    one_run = set(
      spec
      for language in languages
      for spec in language.test_specs()
      if re.search(args.regex, spec.shortname))
    # When running on travis, we want out test runs to be as similar as possible
    # for reproducibility purposes.
    if args.travis:
      massaged_one_run = sorted(one_run, key=lambda x: x.shortname)
    else:
      # whereas otherwise, we want to shuffle things up to give all tests a
      # chance to run.
      massaged_one_run = list(one_run)  # random.shuffle needs an indexable seq.
      random.shuffle(massaged_one_run)  # which it modifies in-place.
    if infinite_runs:
      assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
    runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
                     else itertools.repeat(massaged_one_run, runs_per_test))
    all_runs = itertools.chain.from_iterable(runs_sequence)

    num_test_failures, resultset = jobset.run(
        all_runs, check_cancelled, newline_on_success=newline_on_success,
        travis=args.travis, infinite_runs=infinite_runs, maxjobs=args.jobs,
        stop_on_failure=args.stop_on_failure,
        cache=cache if not xml_report else None,
        add_env={'GRPC_TEST_PORT_SERVER': 'localhost:%d' % port_server_port})
    if resultset:
      for k, v in resultset.iteritems():
        num_runs, num_failures = _calculate_num_runs_failures(v)
        if num_failures == num_runs:  # what about infinite_runs???
          jobset.message('FAILED', k, do_newline=True)
        elif num_failures > 0:
          jobset.message(
              'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
              do_newline=True)
        else:
          jobset.message('PASSED', k, do_newline=True)
  finally:
    for antagonist in antagonists:
      antagonist.kill()
    if xml_report and resultset:
      report_utils.render_junit_xml_report(resultset, xml_report)

  number_failures, _ = jobset.run(
      post_tests_steps, maxjobs=1, stop_on_failure=True,
      newline_on_success=newline_on_success, travis=args.travis)

  out = []
  if number_failures:
    out.append(BuildAndRunError.POST_TEST)
  if num_test_failures:
    out.append(BuildAndRunError.TEST)

  if cache: cache.save()

  return out
Example #3
0
        if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
            workers_and_base_names = {}
            for worker in scenario.workers:
                if not worker.perf_file_base_name:
                    raise Exception(
                        'using perf buf perf report filename is unspecified')
                workers_and_base_names[
                    worker.host_and_port] = worker.perf_file_base_name
            perf_report_failures += run_collect_perf_profile_jobs(
                workers_and_base_names, scenario.name)

# Still write the index.html even if some scenarios failed.
# 'profile_output_files' will only have names for scenarios that passed
if perf_cmd and not args.skip_generate_flamegraphs:
    # write the index fil to the output dir, with all profiles from all scenarios/workers
    report_utils.render_perf_profiling_results(
        '%s/index.html' % _PERF_REPORT_OUTPUT_DIR, profile_output_files)

if total_scenario_failures > 0 or qps_workers_killed > 0:
    print('%s scenarios failed and %s qps worker jobs killed' %
          (total_scenario_failures, qps_workers_killed))
    sys.exit(1)

report_utils.render_junit_xml_report(merged_resultset,
                                     args.xml_report,
                                     suite_name='benchmarks')
if perf_report_failures > 0:
    print('%s perf profile collection jobs failed' % perf_report_failures)
    sys.exit(1)
Example #4
0
    if len(relevant_jobs) == len(jobs):
        print '(TESTING) No tests will be skipped.'
    else:
        print '(TESTING) These tests will be skipped:'
        for job in list(set(jobs) - set(relevant_jobs)):
            print '  %s' % job.shortname
    print

if args.dry_run:
    print '--dry_run was used, exiting'
    sys.exit(1)

jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
                                     newline_on_success=True,
                                     travis=True,
                                     maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset,
                                     'report.xml',
                                     suite_name='aggregate_tests')

if num_failures == 0:
    jobset.message('SUCCESS',
                   'All run_tests.py instance finished successfully.',
                   do_newline=True)
else:
    jobset.message('FAILED',
                   'Some run_tests.py instance have failed.',
                   do_newline=True)
    sys.exit(1)
Example #5
0
        jobs.append(test_job)

  if not jobs:
    print('No jobs to run.')
    for image in docker_images.itervalues():
      dockerjob.remove_image(image, skip_nonexistent=True)
    sys.exit(1)

  num_failures, resultset = jobset.run(jobs, newline_on_success=True,
                                       maxjobs=args.jobs)
  if num_failures:
    jobset.message('FAILED', 'Some tests failed', do_newline=True)
  else:
    jobset.message('SUCCESS', 'All tests passed', do_newline=True)

  report_utils.render_junit_xml_report(resultset, 'report.xml')

  for name, job in resultset.items():
    if "http2" in name:
      job[0].http2results = aggregate_http2_results(job[0].message)

  report_utils.render_interop_html_report(
      set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
      _HTTP2_TEST_CASES, resultset, num_failures,
      args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
      args.http2_interop)

finally:
  # Check if servers are still running.
  for server, job in server_jobs.items():
    if not job.is_running():
Example #6
0
      merged_resultset = dict(itertools.chain(merged_resultset.iteritems(),
                                              resultset.iteritems()))
    finally:
      # Consider qps workers that need to be killed as failures
      qps_workers_killed += finish_qps_workers(scenario.workers)

    if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
      workers_and_base_names = {}
      for worker in scenario.workers:
        if not worker.perf_file_base_name:
          raise Exception('using perf buf perf report filename is unspecified')
        workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
      perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name)


# Still write the index.html even if some scenarios failed.
# 'profile_output_files' will only have names for scenarios that passed
if perf_cmd and not args.skip_generate_flamegraphs:
  # write the index fil to the output dir, with all profiles from all scenarios/workers
  report_utils.render_perf_profiling_results('%s/index.html' % _PERF_REPORT_OUTPUT_DIR, profile_output_files)

if total_scenario_failures > 0 or qps_workers_killed > 0:
  print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
  sys.exit(1)

report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
                                     suite_name='benchmarks')
if perf_report_failures > 0:
  print('%s perf profile collection jobs failed' % perf_report_failures)
  sys.exit(1)
Example #7
0
  relevant_jobs = filter_tests(jobs, args.base_branch)
  # todo(mattkwong): add skipped tests to report.xml
  print
  if len(relevant_jobs) == len(jobs):
    print '(TESTING) No tests will be skipped.'
  else:
    print '(TESTING) These tests will be skipped:'
    for job in list(set(jobs) - set(relevant_jobs)):
      print '  %s' % job.shortname
  print

if args.dry_run:
  print '--dry_run was used, exiting'
  sys.exit(1)

jobset.message('START', 'Running test matrix.', do_newline=True)
num_failures, resultset = jobset.run(jobs,
                                     newline_on_success=True,
                                     travis=True,
                                     maxjobs=args.jobs)
report_utils.render_junit_xml_report(resultset, 'report.xml',
                                     suite_name='aggregate_tests')

if num_failures == 0:
  jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
                 do_newline=True)
else:
  jobset.message('FAILED', 'Some run_tests.py instance have failed.',
                 do_newline=True)
  sys.exit(1)