def testGenerateResultsDict_multipleResults(self):
        result1 = base_test_result.BaseTestResult(
            'test.package.TestName1', base_test_result.ResultType.PASS)
        result2 = base_test_result.BaseTestResult(
            'test.package.TestName2', base_test_result.ResultType.PASS)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result1)
        all_results.AddResult(result2)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName1', 'test.package.TestName2'],
                          results_dict['all_tests'])

        self.assertTrue('per_iteration_data' in results_dict)
        iterations = results_dict['per_iteration_data']
        self.assertEquals(1, len(iterations))

        expected_tests = set([
            'test.package.TestName1',
            'test.package.TestName2',
        ])

        for test_name, iteration_result in six.iteritems(iterations[0]):
            self.assertTrue(test_name in expected_tests)
            expected_tests.remove(test_name)
            self.assertEquals(1, len(iteration_result))

            test_iteration_result = iteration_result[0]
            self.assertTrue('status' in test_iteration_result)
            self.assertEquals('SUCCESS', test_iteration_result['status'])
    def testGenerateResultsDict_globalTags(self):
        raw_results = []
        global_tags = ['UNRELIABLE_RESULTS']

        results_dict = json_results.GenerateResultsDict(
            [raw_results], global_tags=global_tags)
        self.assertEquals(['UNRELIABLE_RESULTS'], results_dict['global_tags'])
Beispiel #3
0
    def post_run(self, return_code):
        # If we don't need to parse the host-side Tast tool's results, fall back to
        # the parent method's default behavior.
        if self._llvm_profile_var:
            return super(TastTest, self).post_run(return_code)

        tast_results_path = os.path.join(self._logs_dir,
                                         'streamed_results.jsonl')
        if not os.path.exists(tast_results_path):
            logging.error(
                'Tast results not found at %s. Falling back to generic result '
                'reporting.', tast_results_path)
            return super(TastTest, self).post_run(return_code)

        # See the link below for the format of the results:
        # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
        with jsonlines.open(tast_results_path) as reader:
            tast_results = collections.deque(reader)

        suite_results = base_test_result.TestRunResults()
        for test in tast_results:
            errors = test['errors']
            start, end = test['start'], test['end']
            # Use dateutil to parse the timestamps since datetime can't handle
            # nanosecond precision.
            duration = dateutil.parser.parse(end) - dateutil.parser.parse(
                start)
            duration_ms = duration.total_seconds() * 1000
            if bool(test['skipReason']):
                result = base_test_result.ResultType.SKIP
            elif errors:
                result = base_test_result.ResultType.FAIL
            else:
                result = base_test_result.ResultType.PASS
            error_log = ''
            if errors:
                # See the link below for the format of these errors:
                # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/tast/testing#Error
                for err in errors:
                    error_log += str(err['stack']) + '\n'
            base_result = base_test_result.BaseTestResult(test['name'],
                                                          result,
                                                          duration=duration_ms,
                                                          log=error_log)
            suite_results.AddResult(base_result)

        if self._test_launcher_summary_output:
            with open(self._test_launcher_summary_output, 'w') as f:
                json.dump(json_results.GenerateResultsDict([suite_results]), f)

        if not suite_results.DidRunPass():
            return 1
        elif return_code:
            logging.warning(
                'No failed tests found, but exit code of %d was returned from '
                'cros_run_test.', return_code)
            return return_code
        return 0
Beispiel #4
0
 def post_run(self, return_code):
   # Create a simple json results file for a test run. The results will contain
   # only one test (suite_name), and will either be a PASS or FAIL depending on
   # return_code.
   if self._test_launcher_summary_output:
     result = (base_test_result.ResultType.FAIL if return_code else
                   base_test_result.ResultType.PASS)
     suite_result = base_test_result.BaseTestResult(self.suite_name, result)
     run_results = base_test_result.TestRunResults()
     run_results.AddResult(suite_result)
     with open(self._test_launcher_summary_output, 'w') as f:
       json.dump(json_results.GenerateResultsDict([run_results]), f)
Beispiel #5
0
 def post_run(self, return_code):
   # Create a simple json results file for the sanity test if needed. The
   # results will contain only one test (SANITY_TEST_TARGET), and will
   # either be a PASS or FAIL depending on the return code of cros_run_vm_test.
   if self._test_launcher_summary_output:
     result = (base_test_result.ResultType.FAIL if return_code else
                   base_test_result.ResultType.PASS)
     sanity_test_result = base_test_result.BaseTestResult(
         SANITY_TEST_TARGET, result)
     run_results = base_test_result.TestRunResults()
     run_results.AddResult(sanity_test_result)
     with open(self._test_launcher_summary_output, 'w') as f:
       json.dump(json_results.GenerateResultsDict([run_results]), f)
    def testGenerateResultsDict_failedResult(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName'], results_dict['all_tests'])
        self.assertEquals(1, len(results_dict['per_iteration_data']))

        iteration_result = results_dict['per_iteration_data'][0]
        self.assertTrue('test.package.TestName' in iteration_result)
        self.assertEquals(1, len(iteration_result['test.package.TestName']))

        test_iteration_result = iteration_result['test.package.TestName'][0]
        self.assertTrue('status' in test_iteration_result)
        self.assertEquals('FAILURE', test_iteration_result['status'])
    def testGenerateResultsDict_duration(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName',
            base_test_result.ResultType.PASS,
            duration=123)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName'], results_dict['all_tests'])
        self.assertEquals(1, len(results_dict['per_iteration_data']))

        iteration_result = results_dict['per_iteration_data'][0]
        self.assertTrue('test.package.TestName' in iteration_result)
        self.assertEquals(1, len(iteration_result['test.package.TestName']))

        test_iteration_result = iteration_result['test.package.TestName'][0]
        self.assertTrue('elapsed_time_ms' in test_iteration_result)
        self.assertEquals(123, test_iteration_result['elapsed_time_ms'])
    def testGenerateResultsDict_passOnRetry(self):
        raw_results = []

        result1 = base_test_result.BaseTestResult(
            'test.package.TestName1', base_test_result.ResultType.FAIL)
        run_results1 = base_test_result.TestRunResults()
        run_results1.AddResult(result1)
        raw_results.append(run_results1)

        result2 = base_test_result.BaseTestResult(
            'test.package.TestName1', base_test_result.ResultType.PASS)
        run_results2 = base_test_result.TestRunResults()
        run_results2.AddResult(result2)
        raw_results.append(run_results2)

        results_dict = json_results.GenerateResultsDict([raw_results])
        self.assertEquals(['test.package.TestName1'],
                          results_dict['all_tests'])

        # Check that there's only one iteration.
        self.assertIn('per_iteration_data', results_dict)
        iterations = results_dict['per_iteration_data']
        self.assertEquals(1, len(iterations))

        # Check that test.package.TestName1 is the only test in the iteration.
        self.assertEquals(1, len(iterations[0]))
        self.assertIn('test.package.TestName1', iterations[0])

        # Check that there are two results for test.package.TestName1.
        actual_test_results = iterations[0]['test.package.TestName1']
        self.assertEquals(2, len(actual_test_results))

        # Check that the first result is a failure.
        self.assertIn('status', actual_test_results[0])
        self.assertEquals('FAILURE', actual_test_results[0]['status'])

        # Check that the second result is a success.
        self.assertIn('status', actual_test_results[1])
        self.assertEquals('SUCCESS', actual_test_results[1]['status'])
    def testGenerateResultsDict_loslessSnippet(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)
        log = 'blah-blah'
        result.SetLog(log)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName'], results_dict['all_tests'])
        self.assertEquals(1, len(results_dict['per_iteration_data']))

        iteration_result = results_dict['per_iteration_data'][0]
        self.assertTrue('test.package.TestName' in iteration_result)
        self.assertEquals(1, len(iteration_result['test.package.TestName']))

        test_iteration_result = iteration_result['test.package.TestName'][0]
        self.assertTrue('losless_snippet' in test_iteration_result)
        self.assertTrue(test_iteration_result['losless_snippet'])
        self.assertTrue('output_snippet' in test_iteration_result)
        self.assertEquals(log, test_iteration_result['output_snippet'])
        self.assertTrue('output_snippet_base64' in test_iteration_result)
        self.assertEquals('', test_iteration_result['output_snippet_base64'])
Beispiel #10
0
def vm_test(args):
    is_sanity_test = args.test_exe == 'cros_vm_sanity_test'

    # To keep things easy for us, ensure both types of output locations are
    # the same.
    if args.test_launcher_summary_output and args.vm_logs_dir:
        json_output_dir = os.path.dirname(
            args.test_launcher_summary_output) or '.'
        if os.path.abspath(json_output_dir) != os.path.abspath(
                args.vm_logs_dir):
            logging.error(
                '--test-launcher-summary-output and --vm-logs-dir must point to '
                'the same directory.')
            return 1

    cros_run_vm_test_cmd = [
        CROS_RUN_VM_TEST_PATH,
        '--start',
        '--board',
        args.board,
        '--cache-dir',
        args.cros_cache,
    ]

    # cros_run_vm_test has trouble with relative paths that go up directories, so
    # cd to src/, which should be the root of all data deps.
    os.chdir(CHROMIUM_SRC_PATH)

    runtime_files = read_runtime_files(args.runtime_deps_path,
                                       args.path_to_outdir)
    if args.vpython_dir:
        # --vpython-dir is relative to the out dir, but --files expects paths
        # relative to src dir, so fix the path up a bit.
        runtime_files.append(
            os.path.relpath(
                os.path.abspath(
                    os.path.join(args.path_to_outdir, args.vpython_dir)),
                CHROMIUM_SRC_PATH))
        runtime_files.append('.vpython')

    # If we're pushing files, we need to set the cwd.
    if runtime_files:
        cros_run_vm_test_cmd.extend(
            ['--cwd',
             os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH)])
    for f in runtime_files:
        cros_run_vm_test_cmd.extend(['--files', f])

    if args.vm_logs_dir:
        cros_run_vm_test_cmd += [
            '--results-src',
            '/var/log/',
            '--results-dest-dir',
            args.vm_logs_dir,
        ]

    if args.test_launcher_summary_output and not is_sanity_test:
        result_dir, result_file = os.path.split(
            args.test_launcher_summary_output)
        # If args.test_launcher_summary_output is a file in cwd, result_dir will be
        # an empty string, so replace it with '.' when this is the case so
        # cros_run_vm_test can correctly handle it.
        if not result_dir:
            result_dir = '.'
        vm_result_file = '/tmp/%s' % result_file
        cros_run_vm_test_cmd += [
            '--results-src',
            vm_result_file,
            '--results-dest-dir',
            result_dir,
        ]

    if is_sanity_test:
        # run_cros_vm_test's default behavior when no cmd is specified is the sanity
        # test that's baked into the VM image. This test smoke-checks the system
        # browser, so deploy our locally-built chrome to the VM before testing.
        cros_run_vm_test_cmd += [
            '--deploy',
            '--build-dir',
            os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH),
        ]
    else:
        pre_test_cmds = [
            # /home is mounted with "noexec" in the VM, but some of our tools
            # and tests use the home dir as a workspace (eg: vpython downloads
            # python binaries to ~/.vpython-root). /tmp doesn't have this
            # restriction, so change the location of the home dir for the
            # duration of the test.
            'export HOME=/tmp',
            '\;',
        ]
        if args.vpython_dir:
            vpython_spec_path = os.path.relpath(
                os.path.join(CHROMIUM_SRC_PATH, '.vpython'),
                args.path_to_outdir)
            pre_test_cmds += [
                # Backslash is needed to prevent $PATH from getting prematurely
                # executed on the host.
                'export PATH=\$PATH:\$PWD/%s' % args.vpython_dir,
                '\;',
                # Initialize the vpython cache. This can take 10-20s, and some tests
                # can't afford to wait that long on the first invocation.
                'vpython',
                '-vpython-spec',
                vpython_spec_path,
                '-vpython-tool',
                'install',
                '\;',
            ]
        cros_run_vm_test_cmd += [
            '--cmd',
            '--',
            # Wrap the cmd to run in the VM around quotes (") so that the
            # interpreter on the host doesn't stop at any ";" or "&&" tokens in the
            # cmd.
            '"',
        ] + pre_test_cmds + [
            './' + args.test_exe,
            '--test-launcher-shard-index=%d' % args.test_launcher_shard_index,
            '--test-launcher-total-shards=%d' %
            args.test_launcher_total_shards,
            '"',
        ]

    if args.test_launcher_summary_output and not is_sanity_test:
        cros_run_vm_test_cmd += [
            '--test-launcher-summary-output=%s' % vm_result_file,
        ]

    logging.info('Running the following command:')
    logging.info(' '.join(cros_run_vm_test_cmd))

    # deploy_chrome needs a set of GN args used to build chrome to determine if
    # certain libraries need to be pushed to the VM. It looks for the args via an
    # env var. To trigger the default deploying behavior, give it a dummy set of
    # args.
    # TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd-line
    # args.
    env_copy = os.environ.copy()
    if not env_copy.get('GN_ARGS'):
        env_copy['GN_ARGS'] = 'is_chromeos = true'
    env_copy['PATH'] = env_copy['PATH'] + ':' + os.path.join(
        CHROMITE_PATH, 'bin')
    rc = subprocess.call(cros_run_vm_test_cmd,
                         stdout=sys.stdout,
                         stderr=sys.stderr,
                         env=env_copy)

    # Create a simple json results file for the sanity test if needed. The results
    # will contain only one test ('cros_vm_sanity_test'), and will either be a
    # PASS or FAIL depending on the return code of cros_run_vm_test above.
    if args.test_launcher_summary_output and is_sanity_test:
        result = (base_test_result.ResultType.FAIL
                  if rc else base_test_result.ResultType.PASS)
        sanity_test_result = base_test_result.BaseTestResult(
            'cros_vm_sanity_test', result)
        run_results = base_test_result.TestRunResults()
        run_results.AddResult(sanity_test_result)
        with open(args.test_launcher_summary_output, 'w') as f:
            json.dump(json_results.GenerateResultsDict([run_results]), f)

    return rc
Beispiel #11
0
    def post_run(self, return_code):
        # If we don't need to parse the host-side Tast tool's results, fall back to
        # the parent method's default behavior.
        if self._llvm_profile_var:
            return super(TastTest, self).post_run(return_code)

        tast_results_path = os.path.join(self._logs_dir,
                                         'streamed_results.jsonl')
        if not os.path.exists(tast_results_path):
            logging.error(
                'Tast results not found at %s. Falling back to generic result '
                'reporting.', tast_results_path)
            return super(TastTest, self).post_run(return_code)

        # See the link below for the format of the results:
        # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
        with jsonlines.open(tast_results_path) as reader:
            tast_results = collections.deque(reader)

        suite_results = base_test_result.TestRunResults()
        for test in tast_results:
            errors = test['errors']
            start, end = test['start'], test['end']
            # Use dateutil to parse the timestamps since datetime can't handle
            # nanosecond precision.
            duration = dateutil.parser.parse(end) - dateutil.parser.parse(
                start)
            duration_ms = duration.total_seconds() * 1000
            if bool(test['skipReason']):
                result = base_test_result.ResultType.SKIP
            elif errors:
                result = base_test_result.ResultType.FAIL
            else:
                result = base_test_result.ResultType.PASS
            error_log = ''
            if errors:
                # See the link below for the format of these errors:
                # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/tast/testing#Error
                for err in errors:
                    error_log += err['stack'].encode('utf-8') + '\n'
            error_log += (
                "\nIf you're unsure why this test failed, consult the steps "
                'outlined in\n%s\n' % TAST_DEBUG_DOC)
            base_result = base_test_result.BaseTestResult(test['name'],
                                                          result,
                                                          duration=duration_ms,
                                                          log=error_log)
            suite_results.AddResult(base_result)
            self._maybe_handle_perf_results(test['name'])

            if self._rdb_client:
                # Walk the contents of the test's "outDir" and atttach any file found
                # inside as an RDB 'artifact'. (This could include system logs, screen
                # shots, etc.)
                artifacts = self.get_artifacts(test['outDir'])
                self._rdb_client.Post(test['name'], result, duration_ms,
                                      error_log, artifacts)

        if self._rdb_client and self._logs_dir:
            # Attach artifacts from the device that don't apply to a single test.
            artifacts = self.get_artifacts(
                os.path.join(self._logs_dir, 'system_logs'))
            artifacts.update(
                self.get_artifacts(os.path.join(self._logs_dir, 'crashes')))
            self._rdb_client.ReportInvocationLevelArtifacts(artifacts)

        if self._test_launcher_summary_output:
            with open(self._test_launcher_summary_output, 'w') as f:
                json.dump(json_results.GenerateResultsDict([suite_results]), f)

        if not suite_results.DidRunPass():
            return 1
        elif return_code:
            logging.warning(
                'No failed tests found, but exit code of %d was returned from '
                'cros_run_test.', return_code)
            return return_code
        return 0
Beispiel #12
0
def vm_test(args):
  is_sanity_test = args.test_exe == 'cros_vm_sanity_test'

  cros_run_vm_test_cmd = [
      CROS_RUN_VM_TEST_PATH,
      '--start',
      '--board', args.board,
      '--cache-dir', args.cros_cache,
  ]

  # cros_run_vm_test has trouble with relative paths that go up directories, so
  # cd to src/, which should be the root of all data deps.
  os.chdir(CHROMIUM_SRC_PATH)

  runtime_files = read_runtime_files(
      args.runtime_deps_path, args.path_to_outdir)
  # If we're pushing files, we need to set the cwd.
  if runtime_files:
      cros_run_vm_test_cmd.extend(
          ['--cwd', os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH)])
  for f in runtime_files:
    cros_run_vm_test_cmd.extend(['--files', f])

  if args.test_launcher_summary_output and not is_sanity_test:
    result_dir, result_file = os.path.split(args.test_launcher_summary_output)
    # If args.test_launcher_summary_output is a file in cwd, result_dir will be
    # an empty string, so replace it with '.' when this is the case so
    # cros_run_vm_test can correctly handle it.
    if not result_dir:
      result_dir = '.'
    vm_result_file = '/tmp/%s' % result_file
    cros_run_vm_test_cmd += [
      '--results-src', vm_result_file,
      '--results-dest-dir', result_dir,
    ]

  if is_sanity_test:
    # run_cros_vm_test's default behavior when no cmd is specified is the sanity
    # test that's baked into the VM image. This test smoke-checks the system
    # browser, so deploy our locally-built chrome to the VM before testing.
    cros_run_vm_test_cmd += [
        '--deploy',
        '--build-dir', os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH),
    ]
  else:
    cros_run_vm_test_cmd += [
        '--cmd',
        '--',
        './' + args.test_exe,
        '--test-launcher-shard-index=%d' % args.test_launcher_shard_index,
        '--test-launcher-total-shards=%d' % args.test_launcher_total_shards,
    ]

  if args.test_launcher_summary_output and not is_sanity_test:
    cros_run_vm_test_cmd += [
      '--test-launcher-summary-output=%s' % vm_result_file,
    ]

  logging.info('Running the following command:')
  logging.info(' '.join(cros_run_vm_test_cmd))

  # deploy_chrome needs a set of GN args used to build chrome to determine if
  # certain libraries need to be pushed to the VM. It looks for the args via an
  # env var. To trigger the default deploying behavior, give it a dummy set of
  # args.
  # TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd-line
  # args.
  env_copy = os.environ.copy()
  if not env_copy.get('GN_ARGS'):
    env_copy['GN_ARGS'] = 'is_chromeos = true'
  env_copy['PATH'] = env_copy['PATH'] + ':' + os.path.join(CHROMITE_PATH, 'bin')
  rc = subprocess.call(
      cros_run_vm_test_cmd, stdout=sys.stdout, stderr=sys.stderr, env=env_copy)

  # Create a simple json results file for the sanity test if needed. The results
  # will contain only one test ('cros_vm_sanity_test'), and will either be a
  # PASS or FAIL depending on the return code of cros_run_vm_test above.
  if args.test_launcher_summary_output and is_sanity_test:
    result = (base_test_result.ResultType.FAIL if rc else
                  base_test_result.ResultType.PASS)
    sanity_test_result = base_test_result.BaseTestResult(
        'cros_vm_sanity_test', result)
    run_results = base_test_result.TestRunResults()
    run_results.AddResult(sanity_test_result)
    with open(args.test_launcher_summary_output, 'w') as f:
      json.dump(json_results.GenerateResultsDict([run_results]), f)

  return rc
Beispiel #13
0
    def post_run(self, return_code):
        tast_results_path = os.path.join(self._logs_dir,
                                         'streamed_results.jsonl')
        if not os.path.exists(tast_results_path):
            logging.error(
                'Tast results not found at %s. Falling back to generic result '
                'reporting.', tast_results_path)
            return super().post_run(return_code)

        # See the link below for the format of the results:
        # https://godoc.org/chromium.googlesource.com/chromiumos/platform/tast.git/src/chromiumos/cmd/tast/run#TestResult
        with jsonlines.open(tast_results_path) as reader:
            tast_results = collections.deque(reader)

        suite_results = base_test_result.TestRunResults()
        for test in tast_results:
            errors = test['errors']
            start, end = test['start'], test['end']
            # Use dateutil to parse the timestamps since datetime can't handle
            # nanosecond precision.
            duration = dateutil.parser.parse(end) - dateutil.parser.parse(
                start)
            # If the duration is negative, Tast has likely reported an incorrect
            # duration. See https://issuetracker.google.com/issues/187973541. Round
            # up to 0 in that case to avoid confusing RDB.
            duration_ms = max(duration.total_seconds() * 1000, 0)
            if bool(test['skipReason']):
                result = base_test_result.ResultType.SKIP
            elif errors:
                result = base_test_result.ResultType.FAIL
            else:
                result = base_test_result.ResultType.PASS
            primary_error_message = None
            error_log = ''
            if errors:
                # See the link below for the format of these errors:
                # https://source.chromium.org/chromiumos/chromiumos/codesearch/+/main:src/platform/tast/src/chromiumos/tast/cmd/tast/internal/run/resultsjson/resultsjson.go
                primary_error_message = errors[0]['reason']
                for err in errors:
                    error_log += err['stack'] + '\n'
            debug_link = (
                "If you're unsure why this test failed, consult the steps "
                'outlined <a href="%s">here</a>.' % TAST_DEBUG_DOC)
            base_result = base_test_result.BaseTestResult(test['name'],
                                                          result,
                                                          duration=duration_ms,
                                                          log=error_log)
            suite_results.AddResult(base_result)
            self._maybe_handle_perf_results(test['name'])

            if self._rdb_client:
                # Walk the contents of the test's "outDir" and atttach any file found
                # inside as an RDB 'artifact'. (This could include system logs, screen
                # shots, etc.)
                artifacts = self.get_artifacts(test['outDir'])
                self._rdb_client.Post(test['name'],
                                      result,
                                      duration_ms,
                                      error_log,
                                      None,
                                      artifacts=artifacts,
                                      failure_reason=primary_error_message,
                                      html_artifact=debug_link)

        if self._rdb_client and self._logs_dir:
            # Attach artifacts from the device that don't apply to a single test.
            artifacts = self.get_artifacts(
                os.path.join(self._logs_dir, 'system_logs'))
            artifacts.update(
                self.get_artifacts(os.path.join(self._logs_dir, 'crashes')))
            self._rdb_client.ReportInvocationLevelArtifacts(artifacts)

        if self._test_launcher_summary_output:
            with open(self._test_launcher_summary_output, 'w') as f:
                json.dump(json_results.GenerateResultsDict([suite_results]), f)

        if not suite_results.DidRunPass():
            return 1
        if return_code:
            logging.warning(
                'No failed tests found, but exit code of %d was returned from '
                'cros_run_test.', return_code)
            return return_code
        return 0
Beispiel #14
0
def vm_test(args, unknown_args):
    is_sanity_test = args.test_exe == 'cros_vm_sanity_test'

    # To keep things easy for us, ensure both types of output locations are
    # the same.
    if args.test_launcher_summary_output and args.vm_logs_dir:
        json_output_dir = os.path.dirname(
            args.test_launcher_summary_output) or '.'
        if os.path.abspath(json_output_dir) != os.path.abspath(
                args.vm_logs_dir):
            logging.error(
                '--test-launcher-summary-output and --vm-logs-dir must point to '
                'the same directory.')
            return 1

    cros_run_vm_test_cmd = [
        CROS_RUN_VM_TEST_PATH,
        '--start',
        '--board',
        args.board,
        '--cache-dir',
        args.cros_cache,
    ]

    # cros_run_vm_test has trouble with relative paths that go up directories, so
    # cd to src/, which should be the root of all data deps.
    os.chdir(CHROMIUM_SRC_PATH)

    runtime_files = read_runtime_files(args.runtime_deps_path,
                                       args.path_to_outdir)
    if args.vpython_dir:
        # --vpython-dir is relative to the out dir, but --files expects paths
        # relative to src dir, so fix the path up a bit.
        runtime_files.append(
            os.path.relpath(
                os.path.abspath(
                    os.path.join(args.path_to_outdir, args.vpython_dir)),
                CHROMIUM_SRC_PATH))
        runtime_files.append('.vpython')

    # If we're pushing files, we need to set the cwd.
    if runtime_files:
        cros_run_vm_test_cmd.extend(
            ['--cwd',
             os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH)])
    for f in runtime_files:
        cros_run_vm_test_cmd.extend(['--files', f])

    if args.vm_logs_dir:
        cros_run_vm_test_cmd += [
            '--results-src',
            '/var/log/',
            '--results-dest-dir',
            args.vm_logs_dir,
        ]

    if args.test_launcher_summary_output and not is_sanity_test:
        result_dir, result_file = os.path.split(
            args.test_launcher_summary_output)
        # If args.test_launcher_summary_output is a file in cwd, result_dir will be
        # an empty string, so replace it with '.' when this is the case so
        # cros_run_vm_test can correctly handle it.
        if not result_dir:
            result_dir = '.'
        vm_result_file = '/tmp/%s' % result_file
        cros_run_vm_test_cmd += [
            '--results-src',
            vm_result_file,
            '--results-dest-dir',
            result_dir,
        ]

    if is_sanity_test:
        # run_cros_vm_test's default behavior when no cmd is specified is the sanity
        # test that's baked into the VM image. This test smoke-checks the system
        # browser, so deploy our locally-built chrome to the VM before testing.
        cros_run_vm_test_cmd += [
            '--deploy',
            '--build-dir',
            os.path.relpath(args.path_to_outdir, CHROMIUM_SRC_PATH),
        ]
    else:
        pre_test_cmds = [
            # /home is mounted with "noexec" in the VM, but some of our tools
            # and tests use the home dir as a workspace (eg: vpython downloads
            # python binaries to ~/.vpython-root). /tmp doesn't have this
            # restriction, so change the location of the home dir for the
            # duration of the test.
            'export HOME=/tmp',
            '\;',
        ]
        if args.vpython_dir:
            vpython_spec_path = os.path.relpath(
                os.path.join(CHROMIUM_SRC_PATH, '.vpython'),
                args.path_to_outdir)
            pre_test_cmds += [
                # Backslash is needed to prevent $PATH from getting prematurely
                # executed on the host.
                'export PATH=\$PATH:\$PWD/%s' % args.vpython_dir,
                '\;',
                # Initialize the vpython cache. This can take 10-20s, and some tests
                # can't afford to wait that long on the first invocation.
                'vpython',
                '-vpython-spec',
                vpython_spec_path,
                '-vpython-tool',
                'install',
                '\;',
            ]
        cros_run_vm_test_cmd += [
            # Some tests fail as root, so run as the less privileged user 'chronos'.
            '--as-chronos',
            '--cmd',
            '--',
            # Wrap the cmd to run in the VM around quotes (") so that the
            # interpreter on the host doesn't stop at any ";" or "&&" tokens in the
            # cmd.
            '"',
        ] + pre_test_cmds + [
            './' + args.test_exe,
            '--test-launcher-shard-index=%d' % args.test_launcher_shard_index,
            '--test-launcher-total-shards=%d' %
            args.test_launcher_total_shards,
        ] + unknown_args + [
            '"',
        ]

    if args.test_launcher_summary_output and not is_sanity_test:
        cros_run_vm_test_cmd += [
            '--test-launcher-summary-output=%s' % vm_result_file,
        ]

    logging.info('Running the following command:')
    logging.info(' '.join(cros_run_vm_test_cmd))

    # deploy_chrome needs a set of GN args used to build chrome to determine if
    # certain libraries need to be pushed to the VM. It looks for the args via an
    # env var. To trigger the default deploying behavior, give it a dummy set of
    # args.
    # TODO(crbug.com/823996): Make the GN-dependent deps controllable via cmd-line
    # args.
    env_copy = os.environ.copy()
    if not env_copy.get('GN_ARGS'):
        env_copy['GN_ARGS'] = 'is_chromeos = true'
    env_copy['PATH'] = env_copy['PATH'] + ':' + os.path.join(
        CHROMITE_PATH, 'bin')

    # Traps SIGTERM and kills all child processes of cros_run_vm_test when it's
    # caught. This will allow us to capture logs from the VM if a test hangs
    # and gets timeout-killed by swarming. See also:
    # https://chromium.googlesource.com/infra/luci/luci-py/+/master/appengine/swarming/doc/Bot.md#graceful-termination_aka-the-sigterm-and-sigkill-dance
    test_proc = None

    def _kill_child_procs(trapped_signal, _):
        logging.warning('Received signal %d. Killing child processes of test.',
                        trapped_signal)
        if not test_proc or not test_proc.pid:
            # This shouldn't happen?
            logging.error('Test process not running.')
            return
        for child in psutil.Process(test_proc.pid).children():
            logging.warning('Killing process %s', child)
            child.kill()

    # Standard GTests should handle retries and timeouts themselves.
    retries, timeout = 0, None
    if is_sanity_test:
        # 5 min should be enough time for the sanity test to pass.
        retries, timeout = 2, 300
    signal.signal(signal.SIGTERM, _kill_child_procs)

    for i in xrange(retries + 1):
        logging.info('########################################')
        logging.info('Test attempt #%d', i)
        logging.info('########################################')
        test_proc = subprocess42.Popen(cros_run_vm_test_cmd,
                                       stdout=sys.stdout,
                                       stderr=sys.stderr,
                                       env=env_copy)
        try:
            test_proc.wait(timeout=timeout)
        except subprocess42.TimeoutExpired:
            logging.error('Test timed out. Sending SIGTERM.')
            # SIGTERM the proc and wait 10s for it to close.
            test_proc.terminate()
            try:
                test_proc.wait(timeout=10)
            except subprocess42.TimeoutExpired:
                # If it hasn't closed in 10s, SIGKILL it.
                logging.error('Test did not exit in time. Sending SIGKILL.')
                test_proc.kill()
                test_proc.wait()
        logging.info('Test exitted with %d.', test_proc.returncode)
        if test_proc.returncode == 0:
            break

    rc = test_proc.returncode

    # Create a simple json results file for the sanity test if needed. The results
    # will contain only one test ('cros_vm_sanity_test'), and will either be a
    # PASS or FAIL depending on the return code of cros_run_vm_test above.
    if args.test_launcher_summary_output and is_sanity_test:
        result = (base_test_result.ResultType.FAIL
                  if rc else base_test_result.ResultType.PASS)
        sanity_test_result = base_test_result.BaseTestResult(
            'cros_vm_sanity_test', result)
        run_results = base_test_result.TestRunResults()
        run_results.AddResult(sanity_test_result)
        with open(args.test_launcher_summary_output, 'w') as f:
            json.dump(json_results.GenerateResultsDict([run_results]), f)

    return rc