Ejemplo n.º 1
0
 def test_message_with_tuple(self):
     error = ScriptError('', ('my', 'command'), -1, 'My output.',
                         '/Users/username/blah')
     self.assertEqual(
         error.message_with_output(),
         'Failed to run "(\'my\', \'command\')" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.'
     )
Ejemplo n.º 2
0
    def run_command(
            self,
            args,
            cwd=None,
            env=None,
            input=None,  # pylint: disable=redefined-builtin
            timeout_seconds=None,
            error_handler=None,
            return_exit_code=False,
            return_stderr=True,
            ignore_stderr=False,
            decode_output=True,
            debug_logging=True):
        self._append_call(args, cwd=cwd, input=input, env=env)

        assert isinstance(args, list) or isinstance(args, tuple)

        if self._should_log:
            env_string = ''
            if env:
                env_string = ', env=%s' % env
            input_string = ''
            if input:
                input_string = ', input=%s' % input
            _log.info('MOCK run_command: %s, cwd=%s%s%s', args, cwd,
                      env_string, input_string)

        if self._exception:
            raise self._exception  # pylint: disable=raising-bad-type
        if self._should_throw:
            raise ScriptError('MOCK ScriptError',
                              output=self._output,
                              exit_code=self._exit_code)

        if self._run_command_fn:
            return self._run_command_fn(args)

        if return_exit_code:
            return self._exit_code

        if self._exit_code and error_handler:
            script_error = ScriptError(
                script_args=args,
                exit_code=self._exit_code,
                output=self._output)
            error_handler(script_error)

        output = self._output
        if return_stderr:
            output += self._stderr
        if decode_output and not isinstance(output, six.text_type):
            output = output.decode('utf-8')

        return output
Ejemplo n.º 3
0
    def update_expectations(self):
        """Downloads text new baselines and adds test expectations lines.

        Returns:
            A pair: A set of tests that are rebaselined, and a dictionary
            mapping tests that couldn't be rebaselined to lists of expectation
            lines written to TestExpectations.
        """
        issue_number = self.get_issue_number()
        if issue_number == 'None':
            raise ScriptError('No issue on current branch.')

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            raise ScriptError('No try job information was collected.')

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if job_status.result == 'SUCCESS':
                self.configs_with_all_pass.extend(
                    self.get_builder_configs(build))
                continue
            result_dicts = self.get_failing_results_dicts(build)
            for result_dict in result_dicts:
                test_expectations = self.merge_dicts(
                    test_expectations, result_dict)

        # At this point, test_expectations looks like: {
        #     'test-with-failing-result': {
        #         config1: SimpleTestResult,
        #         config2: SimpleTestResult,
        #         config3: AnotherSimpleTestResult
        #     }
        # }
        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        # At this point, test_expectations looks like: {
        #     'test-with-failing-result': {
        #         (config1, config2): SimpleTestResult,
        #         (config3,): AnotherSimpleTestResult
        #     }
        # }

        rebaselined_tests, test_expectations = self.download_text_baselines(
            test_expectations)
        exp_lines_dict = self.write_to_test_expectations(test_expectations)
        return rebaselined_tests, exp_lines_dict
Ejemplo n.º 4
0
    def generate_failing_results_dict(self, build, web_test_results):
        """Makes a dict with results for one platform.

        Args:
            builder: Builder instance containing builder information..
            web_test_results: A list of WebTestResult objects.

        Returns:
            A dictionary with the structure: {
                'test-name': {
                    ('full-port-name',): SimpleTestResult
                }
            }
        """
        test_dict = {}
        configs = self.get_builder_configs(build, web_test_results)
        _log.debug(
            'Getting failing results dictionary for %s step in latest %s build',
            web_test_results.step_name(), build.builder_name)

        if len(configs) > 1:
            raise ScriptError('More than one configs were produced for'
                              ' builder and web tests step combination')
        if not configs:
            raise ScriptError('No configuration was found for builder and web test'
                              ' step combination ')
        config = configs[0]
        for result in web_test_results.didnt_run_as_expected_results():
            # TODO(rmhasan) If a test fails unexpectedly then it runs multiple
            # times until, it passes or a retry limit is reached. Even though
            # it passed we there are still flaky failures that we are not
            # creating test expectations for. Maybe we should add a mode
            # which creates expectations for tests that are flaky but still
            # pass in a web test step.

            # Create flaky expectations for flaky tests on Android. In order to
            # do this we should add 'Pass' to all tests with failing
            # expectations that pass in the patchset's try job.
            if result.did_pass() and not self.options.include_unexpected_pass:
                continue

            test_name = result.test_name()
            if not self._is_wpt_test(test_name):
                continue
            test_dict[test_name] = {
                config:
                SimpleTestResult(
                    expected=result.expected_results(),
                    actual=result.actual_results(),
                    bug=self.UMBRELLA_BUG)
            }
        return test_dict
Ejemplo n.º 5
0
 def _remote_branch_ref(self):
     # Use references so that we can avoid collisions, e.g. we don't want to operate on refs/heads/trunk if it exists.
     remote_main_ref = 'refs/remotes/origin/main'
     if self._branch_ref_exists(remote_main_ref):
         return remote_main_ref
     error_msg = "Can't find a branch to diff against. %s does not exist" % remote_main_ref
     raise ScriptError(message=error_msg)
Ejemplo n.º 6
0
    def update_expectations(self):
        """Downloads text new baselines and adds test expectations lines.

        Returns:
            A pair: A set of tests that are rebaselined, and a dictionary
            mapping tests that couldn't be rebaselined to lists of expectation
            lines written to TestExpectations.
        """
        issue_number = self.get_issue_number()
        if issue_number == 'None':
            raise ScriptError('No issue on current branch.')

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            raise ScriptError('No try job information was collected.')

        # The manifest may be used below to do check which tests are reference tests.
        WPTManifest.ensure_manifest(self.host)

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if job_status.result == 'SUCCESS':
                self.ports_with_all_pass.add(self.port_name(build))
            port_results = self.get_failing_results_dict(build)
            test_expectations = self.merge_dicts(test_expectations,
                                                 port_results)

        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        # At this point, test_expectations looks like: {
        #     'test-with-failing-result': {
        #         ('port-name1', 'port-name2'): SimpleTestResult,
        #         'port-name3': SimpleTestResult
        #     }
        # }

        rebaselined_tests, test_expectations = self.download_text_baselines(
            test_expectations)
        test_expectation_lines = self.create_line_dict(test_expectations)
        self.write_to_test_expectations(test_expectation_lines)
        return rebaselined_tests, test_expectation_lines
 def process_cmds(cmd_args):
     if 'token' in cmd_args:
         return '00000'
     elif (('weblayer_shell_wpt on '
            'Ubuntu-16.04 or Ubuntu-18.04') in cmd_args):
         return json.dumps(actual_mp)
     elif (('chrome_public_wpt on '
            'Ubuntu-16.04 or Ubuntu-18.04') in cmd_args):
         raise ScriptError('Test Error')
     elif 'chrome_public_wpt' in cmd_args:
         return json.dumps(baseline_mp)
     else:
         return '{"number": 400, "id":"abcd"}'
Ejemplo n.º 8
0
    def _merge_base(self, git_commit=None):
        if git_commit:
            # Rewrite UPSTREAM to the upstream branch
            if 'UPSTREAM' in git_commit:
                upstream = self._upstream_branch()
                if not upstream:
                    raise ScriptError(
                        message='No upstream/tracking branch set.')
                git_commit = git_commit.replace('UPSTREAM', upstream)

            # Special-case <refname>.. to include working copy changes, e.g., 'HEAD....' shows only the diffs from HEAD.
            if git_commit.endswith('....'):
                return git_commit[:-4]

            if '..' not in git_commit:
                git_commit = git_commit + '^..' + git_commit
            return git_commit

        return self._remote_merge_base()
Ejemplo n.º 9
0
 def test_message_with_output(self):
     error = ScriptError('My custom message!', '', -1)
     self.assertEqual(error.message_with_output(), 'My custom message!')
     error = ScriptError('My custom message!', '', -1, 'My output.')
     self.assertEqual(error.message_with_output(),
                      'My custom message!\n\noutput: My output.')
     error = ScriptError('', 'my_command!', -1, 'My output.',
                         '/Users/username/blah')
     self.assertEqual(
         error.message_with_output(),
         'Failed to run "\'my_command!\'" exit_code: -1 cwd: /Users/username/blah\n\noutput: My output.'
     )
     error = ScriptError('', 'my_command!', -1, 'ab' + '1' * 499)
     self.assertEqual(
         error.message_with_output(),
         'Failed to run "\'my_command!\'" exit_code: -1\n\noutput: Last 500 characters of output:\nb'
         + '1' * 499)
Ejemplo n.º 10
0
 def run_command(_):
     raise ScriptError('Unable to infer commit position from footers rutabaga')
 def _get_marker_line_number(self, path):
     for line in self._test_expectations.get_updated_lines(path):
         if line.to_string() == self.MARKER_COMMENT:
             return line.lineno
     raise ScriptError('Marker comment does not exist in %s' % path)
Ejemplo n.º 12
0
    def update_expectations(self):
        """Downloads text new baselines and adds test expectations lines.

        Returns:
            A pair: A set of tests that are rebaselined, and a dictionary
            mapping tests that couldn't be rebaselined to lists of expectation
            lines written to TestExpectations.
        """
        # The wpt_manifest function in Port is cached by default, but may be out
        # of date if this code is called during test import. An out of date
        # manifest will cause us to mistreat newly added tests, as they will not
        # exist in the cached manifest. To avoid this, we invalidate the cache
        # here. See https://crbug.com/1154650 .
        self.port.wpt_manifest.cache_clear()

        issue_number = self.get_issue_number()
        if issue_number == 'None':
            raise ScriptError('No issue on current branch.')

        build_to_status = self.get_latest_try_jobs()
        _log.debug('Latest try jobs: %r', build_to_status)
        if not build_to_status:
            raise ScriptError('No try job information was collected.')

        # Here we build up a dict of failing test results for all platforms.
        test_expectations = {}
        for build, job_status in build_to_status.iteritems():
            if (job_status.result == 'SUCCESS' and
                    not self.options.include_unexpected_pass):
                continue
            # Temporary logging for https://crbug.com/1154650
            result_dicts = self.get_failing_results_dicts(build)
            _log.info('Merging failing results dicts for %s', build)
            for result_dict in result_dicts:
                test_expectations = self.merge_dicts(
                    test_expectations, result_dict)

        # At this point, test_expectations looks like: {
        #     'test-with-failing-result': {
        #         config1: SimpleTestResult,
        #         config2: SimpleTestResult,
        #         config3: AnotherSimpleTestResult
        #     }
        # }
        # And then we merge results for different platforms that had the same results.
        for test_name, platform_result in test_expectations.iteritems():
            # platform_result is a dict mapping platforms to results.
            test_expectations[test_name] = self.merge_same_valued_keys(
                platform_result)

        # At this point, test_expectations looks like: {
        #     'test-with-failing-result': {
        #         (config1, config2): SimpleTestResult,
        #         (config3,): AnotherSimpleTestResult
        #     }
        # }

        rebaselined_tests, test_expectations = self.download_text_baselines(
            test_expectations)
        exp_lines_dict = self.write_to_test_expectations(test_expectations)
        return rebaselined_tests, exp_lines_dict
Ejemplo n.º 13
0
 def _get_marker_line_number(test_expectations, path, marker_comment):
     for line in test_expectations.get_updated_lines(path):
         if line.to_string() == marker_comment:
             return line.lineno
     raise ScriptError('Marker comment does not exist in %s' % path)
Ejemplo n.º 14
0
 def mock_run_command(args):
     port.host.filesystem.write_binary_file(args[4], mock_image_diff)
     raise ScriptError(exit_code=1)
Ejemplo n.º 15
0
 def run(self, args):
     self.calls.append(['git', 'cl'] + args)
     arg_key = "".join(args)
     if self._git_error_output and arg_key in self._git_error_output.keys():
         raise ScriptError(output=self._git_error_output[arg_key])
     return 'mock output'
Ejemplo n.º 16
0
 def _run_fn(args):
     if args[0] == 'git' and args[1] == 'apply':
         raise ScriptError('MOCK failed applying patch')
     return ''