Esempio n. 1
0
def main():
    quiet = len(sys.argv) >= 2 and sys.argv[1] == '-q'

    file_names = glob.glob('cirq/**/*.py', recursive=True)
    # Remove the engine client code.
    file_names = [
        f for f in file_names if not f.startswith('cirq/google/engine/client/')
    ]
    failed, attempted = run_tests(file_names,
                                  include_modules=True,
                                  include_local=False,
                                  quiet=quiet)

    if failed != 0:
        print(
            shell_tools.highlight(
                'Failed: {} failed, {} passed, {} total'.format(
                    failed, attempted - failed, attempted),
                shell_tools.RED,
            ))
        sys.exit(1)
    else:
        print(
            shell_tools.highlight('Passed: {}'.format(attempted),
                                  shell_tools.GREEN))
        sys.exit(0)
Esempio n. 2
0
def main():
    verbose = True
    checks = [
        all_checks.pylint,
        all_checks.typecheck,
        all_checks.pytest,
        all_checks.incremental_coverage,
    ]

    env = prepared_env.PreparedEnv(None, 'HEAD', 'master', os.getcwd(), None)
    results = []
    for c in checks:
        print()
        print(
            shell_tools.highlight('Running ' + c.command_line_switch(),
                                  shell_tools.GREEN))
        result = c.context(), c.perform_check(env, verbose=verbose)
        print(
            shell_tools.highlight(
                'Finished ' + c.command_line_switch(),
                shell_tools.GREEN if result[1][0] else shell_tools.RED))
        if verbose:
            print(result)
        print()
        results.append(result)

    print()
    print("ALL CHECK RESULTS")
    for result in results:
        print(result)

    if any(not e[1][0] for e in results):
        sys.exit(1)
Esempio n. 3
0
def main():
    if len(sys.argv) < 2:
        print(
            shell_tools.highlight(
                'Must specify a comparison branch '
                '(e.g. "origin/master" or "HEAD~1").', shell_tools.RED))
        sys.exit(1)
    comparison_branch = sys.argv[1]

    env = prepared_env.PreparedEnv(
        github_repo=None,
        actual_commit_id=None,  # local uncommitted files
        compare_commit_id=comparison_branch,
        destination_directory=os.getcwd(),
        virtual_env_path=None)

    pytest = check_pytest_with_coverage.TestAndPrepareCoverageCheck()
    incremental_coverage = check_incremental_coverage.IncrementalCoverageCheck(
        pytest)

    check_results = [
        pytest.run(env, False, set()),
        incremental_coverage.run(env, False, set()),
    ]
    if any(not e.success for e in check_results):
        print(shell_tools.highlight('Failed.', shell_tools.RED))
        sys.exit(1)
Esempio n. 4
0
 def __str__(self):
     outcome = ('ERROR' if self.unexpected_error else
                'pass' if self.success else 'FAIL')
     msg = self.unexpected_error if self.unexpected_error else self.message
     result = '{}: {} ({})'.format(outcome, self.check.context(), msg)
     return shell_tools.highlight(
         result, shell_tools.GREEN if self.success else shell_tools.RED)
Esempio n. 5
0
def check_for_uncovered_lines(env: env_tools.PreparedEnv) -> int:
    # Build context from environment.
    changed_files = env.get_changed_files()

    # Find/print lines that were changed but aren't covered.
    uncovered_count = 0
    for changed_file in changed_files:
        if not is_applicable_python_file(changed_file):
            continue

        base_path = cast(str, env.destination_directory)
        uncovered_lines = get_incremental_uncovered_lines(
            os.path.join(base_path, changed_file), env.compare_commit_id, env.actual_commit_id
        )

        if uncovered_lines:
            uncovered_count += len(uncovered_lines)
            print(
                shell_tools.highlight(
                    '************* {} ({} uncovered)'.format(changed_file, len(uncovered_lines)),
                    color_code=shell_tools.RED,
                )
            )
        for index, line, reason in uncovered_lines:
            print(
                'Line {} {} but not covered: {}'.format(
                    shell_tools.highlight(str(index).rjust(4), color_code=shell_tools.BOLD),
                    reason,
                    shell_tools.highlight(line, color_code=shell_tools.YELLOW),
                )
            )

    # Inform of aggregate result.
    print()
    if uncovered_count:
        print(
            shell_tools.highlight(
                'Found {} uncovered touched lines.'.format(uncovered_count),
                color_code=shell_tools.RED,
            )
        )
    else:
        print(shell_tools.highlight('All touched lines covered', color_code=shell_tools.GREEN))
    print()
    return uncovered_count
Esempio n. 6
0
 def __str__(self):
     outcome = ('ERROR' if self.unexpected_error
         else 'pass' if self.success
         else 'FAIL')
     msg = self.unexpected_error if self.unexpected_error else self.message
     result = '{}: {} ({})'.format(outcome, self.check.context(), msg)
     return shell_tools.highlight(
         result,
         shell_tools.GREEN if self.success else shell_tools.RED)
Esempio n. 7
0
def main():
    quiet = len(sys.argv) >= 2 and sys.argv[1] == '-q'

    file_names = glob.glob('openfermion-cirq/**/*.py', recursive=True)
    failed, attempted = run_tests(file_names,
                                  include_modules=True,
                                  include_local=False,
                                  quiet=quiet)

    if failed != 0:
        print(
            shell_tools.highlight(
                f'Failed: {failed} failed, '
                '{attempted - failed} passed, {attempted} total',
                shell_tools.RED))
        sys.exit(1)
    else:
        print(shell_tools.highlight(f'Passed: {attempted}', shell_tools.GREEN))
        sys.exit(0)
Esempio n. 8
0
def check_for_uncovered_lines(env: env_tools.PreparedEnv) -> int:
    # Build context from environment.
    changed_files = env.get_changed_files()

    # Find/print lines that were changed but aren't covered.
    uncovered_count = 0
    for changed_file in changed_files:
        if not is_applicable_python_file(changed_file):
            continue

        base_path = cast(str, env.destination_directory)
        uncovered_lines = get_incremental_uncovered_lines(
            os.path.join(base_path, changed_file),
            env.compare_commit_id,
            env.actual_commit_id)

        if uncovered_lines:
            uncovered_count += len(uncovered_lines)
            print(shell_tools.highlight(
                '************* {} ({} uncovered)'.format(
                    changed_file,
                    len(uncovered_lines)),
                color_code=shell_tools.RED))
        for index, line, reason in uncovered_lines:
            print('Line {} {} but not covered: {}'.format(
                shell_tools.highlight(str(index).rjust(4),
                                      color_code=shell_tools.BOLD),
                reason,
                shell_tools.highlight(line,
                                      color_code=shell_tools.YELLOW)))

    # Inform of aggregate result.
    print()
    if uncovered_count:
        print(shell_tools.highlight(
            'Found {} uncovered touched lines.'.format(uncovered_count),
            color_code=shell_tools.RED))
    else:
        print(shell_tools.highlight('All touched lines covered',
                                    color_code=shell_tools.GREEN))
    print()
    return uncovered_count
Esempio n. 9
0
    def run(self,
            env: env_tools.PreparedEnv,
            verbose: bool,
            previous_failures: Set['Check']) -> CheckResult:
        """Evaluates this check.

        Args:
            env: The prepared python environment to run the check in.
            verbose: When set, more progress output is produced.
            previous_failures: Checks that have already run and failed.

        Returns:
            A CheckResult instance.
        """

        # Skip if a dependency failed.
        if previous_failures.intersection(self.dependencies):
            print(shell_tools.highlight(
                'Skipped ' + self.command_line_switch(),
                shell_tools.YELLOW))
            return CheckResult(
                self, False, 'Skipped due to dependency failing.', None)

        print(shell_tools.highlight(
            'Running ' + self.command_line_switch(),
            shell_tools.GREEN))
        try:
            success, message = self.perform_check(env, verbose=verbose)
            result = CheckResult(self, success, message, None)
        except Exception as ex:
            result = CheckResult(self, False, 'Unexpected error.', ex)

        print(shell_tools.highlight(
            'Finished ' + self.command_line_switch(),
            shell_tools.GREEN if result.success else shell_tools.RED))
        if verbose:
            print(result)

        return result
Esempio n. 10
0
    def run(self,
            env: env_tools.PreparedEnv,
            verbose: bool,
            previous_failures: Set['Check']) -> CheckResult:
        """Evaluates this check.

        Args:
            env: The prepared python environment to run the check in.
            verbose: When set, more progress output is produced.
            previous_failures: Checks that have already run and failed.

        Returns:
            A CheckResult instance.
        """

        # Skip if a dependency failed.
        if previous_failures.intersection(self.dependencies):
            print(shell_tools.highlight(
                'Skipped ' + self.command_line_switch(),
                shell_tools.YELLOW))
            return CheckResult(
                self, False, 'Skipped due to dependency failing.', None)

        print(shell_tools.highlight(
            'Running ' + self.command_line_switch(),
            shell_tools.GREEN))
        try:
            success, message = self.perform_check(env, verbose=verbose)
            result = CheckResult(self, success, message, None)
        except Exception as ex:
            result = CheckResult(self, False, 'Unexpected error.', ex)

        print(shell_tools.highlight(
            'Finished ' + self.command_line_switch(),
            shell_tools.GREEN if result.success else shell_tools.RED))
        if verbose:
            print(result)

        return result
def main():
    if len(sys.argv) < 2:
        print(shell_tools.highlight(
            'Must specify a comparison branch (e.g. "master").',
            shell_tools.RED))
        sys.exit(1)
    comparison_branch = sys.argv[1]

    env = prepared_env.PreparedEnv(
        github_repo=None,
        actual_commit_id=None,  # local uncommitted files
        compare_commit_id=comparison_branch,
        destination_directory=os.getcwd(),
        virtual_env_path=None)
    check_results = [
        all_checks.pytest.run(env, False, set()),
        all_checks.incremental_coverage.run(env, False, set()),
    ]
    if any(not e.success for e in check_results):
        print(shell_tools.highlight(
            'Failed.',
            shell_tools.RED))
        sys.exit(1)
def main():
    if len(sys.argv) < 2:
        print(
            shell_tools.highlight(
                'Must specify a comparison branch '
                '(e.g. "origin/master" or "HEAD~1").', shell_tools.RED))
        sys.exit(1)
    comparison_branch = sys.argv[1]

    env = prepared_env.PreparedEnv(
        github_repo=None,
        actual_commit_id=None,  # local uncommitted files
        compare_commit_id=comparison_branch,
        destination_directory=os.getcwd(),
        virtual_env_path=None)

    uncovered_count = check_for_uncovered_lines(env)
    if uncovered_count:
        sys.exit(1)
Esempio n. 13
0
def exec_tests(tests: Iterable[Doctest],
               quiet: bool = True) -> Tuple[doctest.TestResults, List[str]]:
    """Runs a list of `Doctest`s and collects and returns any error messages.

    Args:
        tests: The tests to run

    Returns: A tuple containing the results (# failures, # attempts) and a list
        of the error outputs from each failing test.
    """
    if not quiet:
        try_print = print
    else:
        try_print = lambda *args, **kwargs: None
    try_print('Executing tests ', end='')

    failed, attempted = 0, 0
    error_messages = []
    for test in tests:
        out = OutputCapture()
        with out:
            r = test.run()
        failed += r.failed
        attempted += r.attempted
        if r.failed != 0:
            try_print('F', end='', flush=True)
            error = shell_tools.highlight(
                '{}\n{} failed, {} passed, {} total\n'.format(
                    test.file_name, r.failed, r.attempted - r.failed,
                    r.attempted),
                shell_tools.RED,
            )
            error += out.content()
            error_messages.append(error)
        else:
            try_print('.', end='', flush=True)

    try_print()

    return doctest.TestResults(failed=failed,
                               attempted=attempted), error_messages
Esempio n. 14
0
def main():
    pull_request_number, access_token, verbose, checks = parse_args()
    if pull_request_number is None:
        print(
            shell_tools.highlight(
                'No pull request number given. Using local files.',
                shell_tools.YELLOW))
        print()

    test_dir = tempfile.mkdtemp(prefix='test-{}-'.format(REPO_NAME))
    test_dir_2 = tempfile.mkdtemp(prefix='test-{}-py2-'.format(REPO_NAME))
    currently_pending = set()
    env = None
    try:
        env = env_tools.prepare_temporary_test_environment(
            destination_directory=test_dir,
            repository=env_tools.GithubRepository(
                organization=REPO_ORGANIZATION,
                name=REPO_NAME,
                access_token=access_token),
            pull_request_number=pull_request_number,
            commit_ids_known_callback=lambda e: report_pending(
                e, checks, currently_pending),
            verbose=verbose)

        env2 = None

        check_results = []
        failures = set()
        for c in checks:
            # Prepare environment if needed.
            if c.needs_python2_env() and env2 is None:
                env2 = env_tools.derive_temporary_python2_environment(
                    destination_directory=test_dir_2,
                    python3_environment=env,
                    verbose=verbose)

            # Run the check.
            print()
            result = c.pick_env_and_run_and_report(env, env2, verbose,
                                                   failures)

            # Record results.
            check_results.append(result)
            currently_pending.remove(c)
            if not result.success:
                failures.add(c)
            print()

    finally:
        shutil.rmtree(test_dir, ignore_errors=True)
        shutil.rmtree(test_dir_2, ignore_errors=True)
        for c in currently_pending:
            if env is not None:
                env.report_status_to_github('error', 'Unexpected error.',
                                            c.context())

    print()
    print("ALL CHECK RESULTS")
    for result in check_results:
        print(result)

    for result in check_results:
        if result.unexpected_error is not None:
            raise EnvironmentError('At least one check raised.') from (
                result.unexpected_error)

    if any(not e.success for e in check_results):
        sys.exit(1)
Esempio n. 15
0
def main():
    pull_request_number, access_token, verbose, checks = parse_args()
    if pull_request_number is None:
        print(shell_tools.highlight(
            'No pull request number given. Using local files.',
            shell_tools.YELLOW))
        print()

    test_dir = tempfile.mkdtemp(prefix='test-{}-'.format(REPO_NAME))
    test_dir_2 = tempfile.mkdtemp(prefix='test-{}-py2-'.format(REPO_NAME))
    currently_pending = set()
    env = None
    try:
        env = env_tools.prepare_temporary_test_environment(
            destination_directory=test_dir,
            repository=env_tools.GithubRepository(
                organization=REPO_ORGANIZATION,
                name=REPO_NAME,
                access_token=access_token),
            pull_request_number=pull_request_number,
            commit_ids_known_callback=lambda e:
                report_pending(e, checks, currently_pending),
            verbose=verbose)

        env2 = None

        check_results = []
        failures = set()
        for c in checks:
            # Prepare environment if needed.
            if c.needs_python2_env() and env2 is None:
                env2 = env_tools.derive_temporary_python2_environment(
                    destination_directory=test_dir_2,
                    python3_environment=env,
                    verbose=verbose)

            # Run the check.
            print()
            result = c.pick_env_and_run_and_report(env, env2, verbose, failures)

            # Record results.
            check_results.append(result)
            currently_pending.remove(c)
            if not result.success:
                failures.add(c)
            print()

    finally:
        shutil.rmtree(test_dir, ignore_errors=True)
        shutil.rmtree(test_dir_2, ignore_errors=True)
        for c in currently_pending:
            if env is not None:
                env.report_status_to_github('error',
                                            'Unexpected error.',
                                            c.context())

    print()
    print("ALL CHECK RESULTS")
    for result in check_results:
        print(result)

    for result in check_results:
        if result.unexpected_error is not None:
            raise EnvironmentError('At least one check raised.') from (
                result.unexpected_error)

    if any(not e.success for e in check_results):
        sys.exit(1)
Esempio n. 16
0
 def __str__(self):
     outcome = 'ERROR' if self.unexpected_error else 'pass' if self.success else 'FAIL'
     msg = self.unexpected_error if self.unexpected_error else self.message
     result = f'{outcome}: {self.check.context()} ({msg})'
     return shell_tools.highlight(
         result, shell_tools.GREEN if self.success else shell_tools.RED)