Beispiel #1
0
    def perform_check(self, env: env_tools.PreparedEnv, verbose: bool):
        do_coverage = True
        base_path = cast(str, env.destination_directory)
        rc_path = os.path.join(base_path, 'continuous-integration',
                               '.coveragerc')
        target_path = base_path
        result = shell_tools.run_cmd(env.bin('pytest'),
                                     target_path,
                                     None if verbose else '--quiet',
                                     *([
                                         '--cov', '--cov-report=annotate',
                                         '--cov-config={}'.format(rc_path)
                                     ] if do_coverage else []),
                                     out=shell_tools.TeeCapture(sys.stdout),
                                     raise_on_fail=False,
                                     log_run_to_stderr=verbose)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'Tests passed!'

        last_line = [e for e in output.split('\n') if e.strip()][-1]
        fail_match = re.match('.+=== (\d+) failed', last_line)
        if fail_match is None:
            return False, 'Tests failed.'
        return False, '{} tests failed.'.format(fail_match.group(1))
Beispiel #2
0
    def perform_check(self, env: env_tools.PreparedEnv, verbose: bool):
        base_path = cast(str, env.destination_directory)
        rc_path = os.path.join(base_path,
                               'continuous-integration',
                               '.pylintrc')
        files = list(
            env_tools.get_unhidden_ungenerated_python_files(base_path))

        result = shell_tools.run_cmd(
            env.bin('pylint'),
            '--rcfile={}'.format(rc_path),
            *files,
            out=shell_tools.TeeCapture(sys.stdout),
            raise_on_fail=False,
            log_run_to_stderr=verbose,
            abbreviate_non_option_arguments=True)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'No lint here!'
        file_line_count = len(re.findall(r'\*' * 10, output))
        total_line_count = len([e for e in output.split('\n') if e.strip()])
        issue_count = total_line_count - file_line_count

        return False, '{} issues'.format(issue_count)
Beispiel #3
0
    def _common_run_helper(env: env_tools.PreparedEnv, coverage: bool,
                           verbose: bool) -> Tuple[bool, str]:
        base_path = cast(str, env.destination_directory)
        target_path = base_path
        result = shell_tools.run_cmd(
            env.bin('pytest'),
            target_path,
            None if verbose else '--quiet',
            '--cov' if coverage else '',
            '--cov-report=annotate' if coverage else '',
            out=shell_tools.TeeCapture(sys.stdout),
            raise_on_fail=False,
            log_run_to_stderr=verbose,
            abbreviate_non_option_arguments=True)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'Converted tests passed!'

        last_line = [e for e in output.split('\n') if e.strip()][-1]
        fail_match = re.match('.+=== (\d+) failed', last_line)
        if fail_match is None:
            return False, 'Tests failed.'
        return False, '{} tests failed.'.format(fail_match.group(1))
Beispiel #4
0
    def _common_run_helper(env: env_tools.PreparedEnv,
                           coverage: bool,
                           verbose: bool) -> Tuple[bool, str]:
        base_path = cast(str, env.destination_directory)
        target_path = base_path
        result = shell_tools.run_cmd(
            env.bin('pytest'),
            target_path,
            None if verbose else '--quiet',
            '--cov' if coverage else '',
            '--cov-report=annotate' if coverage else '',
            out=shell_tools.TeeCapture(sys.stdout),
            raise_on_fail=False,
            log_run_to_stderr=verbose,
            abbreviate_non_option_arguments=True)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'Converted tests passed!'

        last_line = [e for e in output.split('\n') if e.strip()][-1]
        fail_match = re.match('.+=== (\d+) failed', last_line)
        if fail_match is None:
            return False, 'Tests failed.'
        return False, '{} tests failed.'.format(fail_match.group(1))
Beispiel #5
0
    def perform_check(self, env: env_tools.PreparedEnv, verbose: bool):
        base_path = cast(str, env.destination_directory)
        rc_path = os.path.join(base_path, 'continuous-integration',
                               '.pylintrc')
        files = list(
            env_tools.get_unhidden_ungenerated_python_files(base_path))

        result = shell_tools.run_cmd(env.bin('pylint'),
                                     '--reports=no',
                                     '--score=no',
                                     '--output-format=colorized',
                                     '--rcfile={}'.format(rc_path),
                                     *files,
                                     out=shell_tools.TeeCapture(sys.stdout),
                                     raise_on_fail=False,
                                     log_run_to_stderr=verbose,
                                     abbreviate_non_option_arguments=True)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'No lint here!'
        file_line_count = len(re.findall(r'\*' * 10, output))
        total_line_count = len([e for e in output.split('\n') if e.strip()])
        issue_count = total_line_count - file_line_count

        return False, '{} issues'.format(issue_count)
    def perform_check(self, env: env_tools.PreparedEnv, verbose: bool):
        do_coverage = True
        base_path = cast(str, env.destination_directory)
        rc_path = os.path.join(base_path,
                               'continuous-integration',
                               '.coveragerc')
        target_path = base_path
        result = shell_tools.run_cmd(
            env.bin('pytest'),
            target_path,
            None if verbose else '--quiet',
            *([
                  '--cov',
                  '--cov-report=annotate',
                  '--cov-config={}'.format(rc_path)
              ] if do_coverage else []),
            out=shell_tools.TeeCapture(sys.stdout),
            raise_on_fail=False,
            log_run_to_stderr=verbose)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'Tests passed!'

        last_line = [e for e in output.split('\n') if e.strip()][-1]
        fail_match = re.match('.+=== (\d+) failed', last_line)
        if fail_match is None:
            return False, 'Tests failed.'
        return False, '{} tests failed.'.format(fail_match.group(1))
Beispiel #7
0
    def pick_env_and_run_and_report(self, env: env_tools.PreparedEnv,
                                    verbose: bool,
                                    previous_failures: Set['Check']
                                   ) -> CheckResult:
        """Evaluates this check in python 3 or 2.7, and reports to github.

        If the prepared environments are not linked to a github repository,
        with a known access token, reporting to github is skipped.

        Args:
            env: A prepared python 3 environment.
            verbose: When set, more progress output is produced.
            previous_failures: Checks that have already run and failed.

        Returns:
            A CheckResult instance.
        """
        env.report_status_to_github('pending', 'Running...', self.context())
        chosen_env = cast(env_tools.PreparedEnv, env)
        os.chdir(cast(str, chosen_env.destination_directory))

        result = self.run(chosen_env, verbose, previous_failures)

        if result.unexpected_error is not None:
            env.report_status_to_github('error',
                                        'Unexpected error.',
                                        self.context())
        else:
            env.report_status_to_github(
                'success' if result.success else 'failure',
                result.message,
                self.context())

        return result
Beispiel #8
0
def check_for_uncovered_lines(env: env_tools.PreparedEnv) -> int:
    # Build context from environment.
    changed_files = env.get_changed_files()

    # Find/print lines that were changed but aren't covered.
    uncovered_count = 0
    for changed_file in changed_files:
        if not is_applicable_python_file(changed_file):
            continue

        base_path = cast(str, env.destination_directory)
        uncovered_lines = get_incremental_uncovered_lines(
            os.path.join(base_path, changed_file), env.compare_commit_id, env.actual_commit_id
        )

        if uncovered_lines:
            uncovered_count += len(uncovered_lines)
            print(
                shell_tools.highlight(
                    '************* {} ({} uncovered)'.format(changed_file, len(uncovered_lines)),
                    color_code=shell_tools.RED,
                )
            )
        for index, line, reason in uncovered_lines:
            print(
                'Line {} {} but not covered: {}'.format(
                    shell_tools.highlight(str(index).rjust(4), color_code=shell_tools.BOLD),
                    reason,
                    shell_tools.highlight(line, color_code=shell_tools.YELLOW),
                )
            )

    # Inform of aggregate result.
    print()
    if uncovered_count:
        print(
            shell_tools.highlight(
                'Found {} uncovered touched lines.'.format(uncovered_count),
                color_code=shell_tools.RED,
            )
        )
    else:
        print(shell_tools.highlight('All touched lines covered', color_code=shell_tools.GREEN))
    print()
    return uncovered_count
def check_for_uncovered_lines(env: env_tools.PreparedEnv) -> int:
    # Build context from environment.
    changed_files = env.get_changed_files()

    # Find/print lines that were changed but aren't covered.
    uncovered_count = 0
    for changed_file in changed_files:
        if not is_applicable_python_file(changed_file):
            continue

        base_path = cast(str, env.destination_directory)
        uncovered_lines = get_incremental_uncovered_lines(
            os.path.join(base_path, changed_file),
            env.compare_commit_id,
            env.actual_commit_id)

        if uncovered_lines:
            uncovered_count += len(uncovered_lines)
            print(shell_tools.highlight(
                '************* {} ({} uncovered)'.format(
                    changed_file,
                    len(uncovered_lines)),
                color_code=shell_tools.RED))
        for index, line, reason in uncovered_lines:
            print('Line {} {} but not covered: {}'.format(
                shell_tools.highlight(str(index).rjust(4),
                                      color_code=shell_tools.BOLD),
                reason,
                shell_tools.highlight(line,
                                      color_code=shell_tools.YELLOW)))

    # Inform of aggregate result.
    print()
    if uncovered_count:
        print(shell_tools.highlight(
            'Found {} uncovered touched lines.'.format(uncovered_count),
            color_code=shell_tools.RED))
    else:
        print(shell_tools.highlight('All touched lines covered',
                                    color_code=shell_tools.GREEN))
    print()
    return uncovered_count
Beispiel #10
0
    def perform_check(self, env: env_tools.PreparedEnv, verbose: bool):
        base_path = cast(str, env.destination_directory)
        config_path = os.path.join(base_path, 'dev_tools', 'conf', 'mypy.ini')
        files = list(
            env_tools.get_unhidden_ungenerated_python_files(base_path))

        result = shell_tools.run_cmd(env.bin('mypy'),
                                     '--config-file={}'.format(config_path),
                                     *files,
                                     out=shell_tools.TeeCapture(sys.stdout),
                                     raise_on_fail=False,
                                     log_run_to_stderr=verbose,
                                     abbreviate_non_option_arguments=True)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'Types look good!'
        issue_count = len([e for e in output.split('\n') if e.strip()])

        return False, '{} issues'.format(issue_count)
Beispiel #11
0
    def perform_check(self, env: env_tools.PreparedEnv, verbose: bool):
        base_path = cast(str, env.destination_directory)
        config_path = os.path.join(base_path,
                                   'continuous-integration',
                                   'mypy.ini')
        files = list(env_tools.get_unhidden_ungenerated_python_files(
            base_path))

        result = shell_tools.run_cmd(
            env.bin('mypy'),
            '--config-file={}'.format(config_path),
            *files,
            out=shell_tools.TeeCapture(sys.stdout),
            raise_on_fail=False,
            log_run_to_stderr=verbose,
            abbreviate_non_option_arguments=True)

        output = cast(str, result[0])
        passed = result[2] == 0
        if passed:
            return True, 'Types look good!'
        issue_count = len([e for e in output.split('\n') if e.strip()])

        return False, '{} issues'.format(issue_count)
Beispiel #12
0
    def pick_env_and_run_and_report(self,
                                    env: env_tools.PreparedEnv,
                                    env_py2: Optional[env_tools.PreparedEnv],
                                    verbose: bool,
                                    previous_failures: Set['Check']
                                    ) -> CheckResult:
        """Evaluates this check in python 3 or 2.7, and reports to github.

        If the prepared environments are not linked to a github repository,
        with a known access token, reporting to github is skipped.

        Args:
            env: A prepared python 3 environment.
            env_py2: A prepared python 2.7 environment.
            verbose: When set, more progress output is produced.
            previous_failures: Checks that have already run and failed.

        Returns:
            A CheckResult instance.
        """
        env.report_status_to_github('pending', 'Running...', self.context())
        chosen_env = cast(env_tools.PreparedEnv,
                          env_py2 if self.needs_python2_env() else env)
        os.chdir(cast(str, chosen_env.destination_directory))

        result = self.run(chosen_env, verbose, previous_failures)

        if result.unexpected_error is not None:
            env.report_status_to_github('error',
                                        'Unexpected error.',
                                        self.context())
        else:
            env.report_status_to_github(
                'success' if result.success else 'failure',
                result.message,
                self.context())

        return result