Exemplo n.º 1
0
def pytest_unconfigure(config):

    if HAS_COVERAGE:

        # We create an empty coverage data object
        combined_cdata = CoverageData()

        # Add all files from astropy_helpers to make sure we compute the total
        # coverage, not just the coverage of the files that have non-zero
        # coverage.

        lines = {}
        for filename in glob.glob(os.path.join('astropy_helpers', '**', '*.py'), recursive=True):
            lines[os.path.abspath(filename)] = []

        for cdata in SUBPROCESS_COVERAGE:
            # For each CoverageData object, we go through all the files and
            # change the filename from one which might be a temporary path
            # to the local filename. We then only keep files that actually
            # exist.
            for filename in cdata.measured_files():
                try:
                    pos = filename.rindex('astropy_helpers')
                except ValueError:
                    continue
                short_filename = filename[pos:]
                if os.path.exists(short_filename):
                    lines[os.path.abspath(short_filename)].extend(cdata.lines(filename))

        combined_cdata.add_lines(lines)

        combined_cdata.write_file('.coverage.subprocess')
Exemplo n.º 2
0
    def __init__(
        self, test_runner, search_strategy, test, settings, random, had_seed
    ):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.at_least_one_success = False
        self.last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__had_seed = had_seed

        self.test = test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()
        self.failed_normally = False

        self.used_examples_from_database = False

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                parent = Collector._collectors[-1]

                # We include any files the collector has already decided to
                # trace whether or not on re-investigation we still think it
                # wants to trace them. The reason for this is that in some
                # cases coverage gets the wrong answer when we run it
                # ourselves due to reasons that are our fault but are hard to
                # fix (we lie about where certain functions come from).
                # This causes us to not record the actual test bodies as
                # covered. But if we intended to trace test bodies then the
                # file must already have been traced when getting to this point
                # and so will already be in the collector's data. Hence we can
                # use that information to get the correct answer here.
                # See issue 997 for more context.
                self.files_to_propagate = set(parent.data)
                self.hijack_collector(parent)

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None
Exemplo n.º 3
0
    def coverage_data(self, debug=None):
        """Return coverage.py coverage data as `coverage.CoverageData`."""
        coverage_str = self.resources.get(COVERAGE_KEY, None)
        if not coverage_str:
            return None
        from coverage import CoverageData

        coverage_data = CoverageData(debug=debug)
        coverage_data.read_fileobj(StringIO(coverage_str))
        return coverage_data
Exemplo n.º 4
0
def compute_coverage(branch):
    coverage_data = CoverageData()
    try:
        with project_path.join('.coverage').open() as fp:
            coverage_data.read_file(fp)
    except Exception:
        print("No coverage data found", file=sys.stderr)

    git_proc = subprocess.Popen(['git', 'diff', '-U0', branch],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.PIPE)
    git_output = git_proc.stdout.read()
    files = git_output.split("diff --git")

    from collections import defaultdict
    file_data = defaultdict(list)

    for the_file in files:
        filenames = re.findall('a/(.*?) b/(.*)', the_file)
        if not filenames:
            continue
        filename = project_path.join(filenames[0][1])
        if '.py' != filename.ext:
            continue
        the_file += "git_output_checker"
        the_diffs = re.findall(
            r'(@@.*?@@.*?(?=@@|git_output_checker))',
            the_file,
            re.M | re.S,
        )
        for diff in the_diffs:
            diff_args = re.match(r'@@ -(\d+)(,(\d+))*\s+\+(\d+)(,(\d+))*',
                                 diff).groups()
            if diff_args[5]:
                for extra_line in range(int(diff_args[5])):
                    file_data[filename].append(extra_line + int(diff_args[3]))
            else:
                file_data[filename].append(int(diff_args[3]))

    line_count = 0
    completed_lines = 0
    for file_changed, lines in file_data.items():
        for line in lines:
            line_count += 1
            used_lines = coverage_data.lines(file_changed)
            if not used_lines:
                continue
            if isinstance(used_lines, int):
                used_lines = set([used_lines])
            else:
                used_lines = set(used_lines)
            if line in used_lines:
                completed_lines += 1

    return float(completed_lines) / line_count * 100
Exemplo n.º 5
0
def get_existing_tests(coverage_file_path: str) -> Set[str]:
    """Read all the test function names from the coverage file.
    pytest-cov creates the coverage file and adds a section at the
    end of each testname which need to be stripped.
    """
    coverage_data = CoverageData(coverage_file_path)
    coverage_data.read()
    return {
        strip_pytest_cov_testname(testname)
        for testname in coverage_data.measured_contexts()
    }
Exemplo n.º 6
0
    def evaluate_test_data(self, data):
        try:
            if self.collector is None:
                result = self.execute(data)
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.execute(data, collect=True)
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename))
            if result is not None and self.settings.perform_health_check:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))
Exemplo n.º 7
0
    def test_note(self):
        self.make_file(".coveragerc", """\
            [run]
            data_file = mydata.dat
            note = These are musical notes: ♫𝅗𝅥♩
            """)
        self.make_file("simple.py", """print('hello')""")
        self.run_command("coverage run simple.py")

        data = CoverageData()
        data.read_file("mydata.dat")
        infos = data.run_infos()
        self.assertEqual(len(infos), 1)
        self.assertEqual(infos[0]['note'], u"These are musical notes: ♫𝅗𝅥♩")
Exemplo n.º 8
0
    def test_note(self):
        self.make_file(".coveragerc", """\
            [run]
            data_file = mydata.dat
            note = These are musical notes: ♫𝅗𝅥♩
            """)
        self.make_file("simple.py", """print('hello')""")
        self.run_command("coverage run simple.py")

        data = CoverageData()
        data.read_file("mydata.dat")
        infos = data.run_infos()
        self.assertEqual(len(infos), 1)
        self.assertEqual(infos[0]['note'], u"These are musical notes: ♫𝅗𝅥♩")
Exemplo n.º 9
0
def compute_coverage(branch):
    coverage_data = CoverageData()
    try:
        with project_path.join('.coverage').open() as fp:
            coverage_data.read_file(fp)
    except Exception:
        print("No coverage data found", file=sys.stderr)

    git_proc = subprocess.Popen(['git', 'diff', '-U0', branch],
        stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    git_output = git_proc.stdout.read()
    files = git_output.split("diff --git")

    from collections import defaultdict
    file_data = defaultdict(list)

    for the_file in files:
        filenames = re.findall('a/(.*?) b/(.*)', the_file)
        if not filenames:
            continue
        filename = project_path.join(filenames[0][1])
        if '.py' != filename.ext:
            continue
        the_file += "git_output_checker"
        the_diffs = re.findall('(@@.*?@@.*?(?=@@|git_output_checker))', the_file, re.M | re.S, )
        for diff in the_diffs:
            diff_args = re.match('@@ -(\d+)(,(\d+))*\s+\+(\d+)(,(\d+))*', diff).groups()
            if diff_args[5]:
                for extra_line in range(int(diff_args[5])):
                    file_data[filename].append(extra_line + int(diff_args[3]))
            else:
                file_data[filename].append(int(diff_args[3]))

    line_count = 0
    completed_lines = 0
    for file_changed, lines in file_data.items():
        for line in lines:
            line_count += 1
            used_lines = coverage_data.lines(file_changed)
            if not used_lines:
                continue
            if isinstance(used_lines, int):
                used_lines = set([used_lines])
            else:
                used_lines = set(used_lines)
            if line in used_lines:
                completed_lines += 1

    return float(completed_lines) / line_count * 100
Exemplo n.º 10
0
def get_tests_from_changes(
    commithash_to_compare: str, coverage_file_path: str
) -> Set[str]:
    """Returns the test set from Git changes.
    The given commithash is compared to the current working copy
    to extract Git diffs, if the provided commithash exists in the repo.
    Otherwise only changes in the git working directory are considered.
    """
    repo = get_git_repo()
    if commit_exists(commithash_to_compare, repo):
        file_diffs = {
            file_path: get_file_diff_data_committed_and_workdir(
                repo, file_path, commithash_to_compare
            )
            for file_path in get_changed_files_committed_and_workdir(
                repo, commithash_to_compare
            )
        }
    else:
        file_diffs = {
            file_path: get_file_diff_data_workdir(repo, file_path)
            for file_path in get_changed_files_workdir(repo)
        }
    coverage_data = CoverageData(coverage_file_path)
    coverage_data.read()

    tests: Set[str] = set()
    for changed_file in file_diffs:

        contexts = coverage_data.contexts_by_lineno(changed_file)
        if not contexts:
            continue

        changed_lines_with_tests = intersect_with_surroundings(
            get_changed_lines(file_diffs[changed_file]),
            contexts.keys()
        )

        tests.update(
            strip_pytest_cov_testname(testname)
            for line in changed_lines_with_tests
            for testname in contexts[line]
        )

    return tests
Exemplo n.º 11
0
def save_coverage(tree, templates, output_dir, app_name, granularity):
    groups = Utils2.get_groupped_classes(tree)
    init_row = templates['init_row.pt']
    init_table = templates['init_table.pt']
    index_template = templates['index.pt']

    rows = []
    total_coverage_data = CoverageData()
    for g in groups:
        (package, path, coverage_data) = save_package_indexhtml(g, templates, output_dir, app_name, granularity)
        coverage = coverage_data.get_formatted_coverage(granularity)
        row = init_row(elementlink=path, type='package', elementname=package,
                  coverage=coverage,
                  respath='', coverage_data=coverage_data,
                  is_instruction_level=Granularity.is_instruction(granularity),
                  progress_covered=coverage_data.covered(granularity),
                  progress_missed=coverage_data.missed(granularity))
        rows.append(Markup(row))
        total_coverage_data.add_data(coverage_data)
    total_coverage = total_coverage_data.get_formatted_coverage(granularity)
    table = init_table(rows=Markup("\n".join(rows)),
                        total_coverage=total_coverage,
                        total_coverage_data=total_coverage_data,
                        is_instruction_level=Granularity.is_instruction(granularity),
                        progress_covered=total_coverage_data.covered(granularity),
                        progress_all=total_coverage_data.coverable(granularity))
    root_path = ''
    html = index_template(table=Markup(table), appname=app_name, title=app_name, package=None, 
                          respath=root_path, file_name=None, granularity=Granularity.get(granularity))
    path = os.path.join(output_dir, 'index.html')
    with open(path, 'w') as f:
        f.write(html)
Exemplo n.º 12
0
    def __init__(
        self, test_runner, search_strategy, test, settings, random, had_seed
    ):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.at_least_one_success = False
        self.last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__had_seed = had_seed

        self.test = test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()

        self.used_examples_from_database = False

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                self.hijack_collector(Collector._collectors[-1])

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None
Exemplo n.º 13
0
def run_setup(setup_script, args):

    # This used to call setuptools.sandbox's run_setup, but due to issues with
    # this and Cython (which caused segmentation faults), we now use subprocess.

    setup_script = os.path.abspath(setup_script)

    path = os.path.dirname(setup_script)
    setup_script = os.path.basename(setup_script)

    if HAS_COVERAGE:

        # In this case, we run the command using the coverage command and we
        # then collect the coverage data into a SUBPROCESS_COVERAGE list which
        # is set up at the start of the testing process and is then combined
        # into a single .coverage file at the end of the testing process.

        p = sp.Popen(['coverage', 'run', setup_script] + list(args),
                     cwd=path,
                     stdout=sp.PIPE,
                     stderr=sp.PIPE)
        stdout, stderr = p.communicate()

        cdata = CoverageData()
        cdata.read_file(os.path.join(path, '.coverage'))
        SUBPROCESS_COVERAGE.append(cdata)

    else:

        # Otherwise we just run the tests with Python

        p = sp.Popen([sys.executable, setup_script] + list(args),
                     cwd=path,
                     stdout=sp.PIPE,
                     stderr=sp.PIPE)
        stdout, stderr = p.communicate()

    sys.stdout.write(stdout.decode('utf-8'))
    sys.stderr.write(stderr.decode('utf-8'))

    if p.returncode != 0:
        raise SystemExit(p.returncode)
Exemplo n.º 14
0
def print_coverage(coverage_data: CoverageData, coverage: Coverage):
    for f in coverage_data.measured_files():
        lines = coverage_data.lines(f)
        print('')
        print('')
        pprint(f)
        print('lines:')
        pprint(lines)
        print('')
        print('arcs:')
        pprint(coverage_data.arcs(f))
        print('')
        print('')
        zzz = coverage.analysis(f)
        pprint(zzz)
        print('')
        print()

    summary = coverage_data.line_counts()
    print('line_counts')
    pprint(summary)
Exemplo n.º 15
0
def pytest_unconfigure(config):

    if HAS_COVERAGE:

        # We create an empty coverage data object
        combined_cdata = CoverageData()

        # Add all files from extension_helpers to make sure we compute the total
        # coverage, not just the coverage of the files that have non-zero
        # coverage.

        lines = {}
        for filename in glob.glob(os.path.join('extension_helpers', '**',
                                               '*.py'),
                                  recursive=True):
            lines[os.path.abspath(filename)] = []

        for cdata in SUBPROCESS_COVERAGE:
            # For each CoverageData object, we go through all the files and
            # change the filename from one which might be a temporary path
            # to the local filename. We then only keep files that actually
            # exist.
            for filename in cdata.measured_files():
                try:
                    pos = filename.rindex('extension_helpers')
                except ValueError:
                    continue
                short_filename = filename[pos:]
                if os.path.exists(short_filename):
                    lines[os.path.abspath(short_filename)].extend(
                        cdata.lines(filename))

        combined_cdata.add_lines(lines)

        combined_cdata.write_file('.coverage.subprocess')
Exemplo n.º 16
0
def get_coverage_data_from_file(path: str = '.coverage') -> 'CoverageData':
    """Get the coverage data from a file

    Coverage is generally stored in a '.covarage' file in
    sqllite format, which can be loaded for post processing.

    Args:
        path (str): path to the coverage file

    Raises:
        AssertionError: raised when coverage data object is empty

    Returns:
        CoverageData: previously recorded test coverage
    """
    from coverage import CoverageData
    cov_data = CoverageData(basename=path)
    cov_data.read()

    assert cov_data, 'CoverageData is loaded'

    return cov_data
Exemplo n.º 17
0
def run_setup(setup_script, args):

    # This used to call setuptools.sandbox's run_setup, but due to issues with
    # this and Cython (which caused segmentation faults), we now use subprocess.

    setup_script = os.path.abspath(setup_script)

    path = os.path.dirname(setup_script)
    setup_script = os.path.basename(setup_script)

    if HAS_COVERAGE:

        # In this case, we run the command using the coverage command and we
        # then collect the coverage data into a SUBPROCESS_COVERAGE list which
        # is set up at the start of the testing process and is then combined
        # into a single .coverage file at the end of the testing process.

        p = sp.Popen(['coverage', 'run', setup_script] + list(args), cwd=path,
                     stdout=sp.PIPE, stderr=sp.PIPE)
        stdout, stderr = p.communicate()

        cdata = CoverageData()
        cdata.read_file(os.path.join(path, '.coverage'))
        SUBPROCESS_COVERAGE.append(cdata)

    else:

        # Otherwise we just run the tests with Python

        p = sp.Popen([sys.executable, setup_script] + list(args), cwd=path,
                     stdout=sp.PIPE, stderr=sp.PIPE)
        stdout, stderr = p.communicate()

    sys.stdout.write(stdout.decode('utf-8'))
    sys.stderr.write(stderr.decode('utf-8'))

    if p.returncode != 0:
        raise SystemExit(p.returncode)
Exemplo n.º 18
0
class StateForActualGivenExecution(object):

    def __init__(self, test_runner, search_strategy, test, settings, random):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.at_least_one_success = False
        self.last_exception = None
        self.repr_for_last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__in_final_replay = False

        if self.settings.deadline is None:
            self.test = test
        else:
            @proxies(test)
            def timed_test(*args, **kwargs):
                self.__test_runtime = None
                start = time.time()
                result = test(*args, **kwargs)
                runtime = (time.time() - start) * 1000
                self.__test_runtime = runtime
                if self.settings.deadline is not_set:
                    if (
                        not self.__warned_deadline and
                        runtime >= 200
                    ):
                        self.__warned_deadline = True
                        note_deprecation((
                            'Test took %.2fms to run. In future the default '
                            'deadline setting will be 200ms, which will '
                            'make this an error. You can set deadline to '
                            'an explicit value of e.g. %d to turn tests '
                            'slower than this into an error, or you can set '
                            'it to None to disable this check entirely.') % (
                                runtime, ceil(runtime / 100) * 100,
                        ))
                elif runtime >= self.current_deadline:
                    raise DeadlineExceeded(runtime, self.settings.deadline)
                return result
            self.test = timed_test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                self.hijack_collector(Collector._collectors[-1])

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None

    @property
    def current_deadline(self):
        base = self.settings.deadline
        if self.__in_final_replay:
            return base
        else:
            return base * 1.25

    def should_trace(self, original_filename, frame):  # pragma: no cover
        disp = FileDisposition()
        assert original_filename is not None
        disp.original_filename = original_filename
        disp.canonical_filename = encoded_filepath(
            canonical_filename(original_filename))
        disp.source_filename = disp.canonical_filename
        disp.reason = ''
        disp.file_tracer = None
        disp.has_dynamic_filename = False
        disp.trace = hypothesis_check_include(disp.canonical_filename)
        if not disp.trace:
            disp.reason = 'hypothesis internal reasons'
        elif self.__existing_collector is not None:
            check = self.__existing_collector.should_trace(
                original_filename, frame)
            if check.trace:
                self.files_to_propagate.add(check.canonical_filename)
        return disp

    def hijack_collector(self, collector):  # pragma: no cover
        self.__existing_collector = collector
        original_save_data = collector.save_data

        def save_data(covdata):
            original_save_data(covdata)
            if collector.branch:
                covdata.add_arcs({
                    filename: {
                        arc: None
                        for arc in self.coverage_data.arcs(filename)}
                    for filename in self.files_to_propagate
                })
            else:
                covdata.add_lines({
                    filename: {
                        line: None
                        for line in self.coverage_data.lines(filename)}
                    for filename in self.files_to_propagate
                })
            collector.save_data = original_save_data
        collector.save_data = save_data

    def evaluate_test_data(self, data):
        if (
            time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT
        ):
            fail_health_check(self.settings, (
                'Your test has been running for at least five minutes. This '
                'is probably not what you intended, so by default Hypothesis '
                'turns it into an error.'
            ), HealthCheck.hung_test)

        try:
            if self.collector is None:
                result = self.test_runner(data, reify_and_execute(
                    self.search_strategy, self.test,
                ))
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.test_runner(data, reify_and_execute(
                        self.search_strategy, self.test,
                        collector=self.collector
                    ))
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename)
                        )
            if result is not None and self.settings.perform_health_check:
                fail_health_check(self.settings, (
                    'Tests run under @given should return None, but '
                    '%s returned %r instead.'
                ) % (self.test.__name__, result), HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
            HypothesisDeprecationWarning, FailedHealthCheck,
            StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))

    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        database_key = str_to_bytes(fully_qualified_name(self.test))
        self.start_time = time.time()
        global in_given
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings, random=self.random,
            database_key=database_key,
        )

        if in_given or self.collector is None:
            runner.run()
        else:  # pragma: no cover
            in_given = True
            original_trace = sys.gettrace()
            try:
                sys.settrace(None)
                runner.run()
            finally:
                in_given = False
                sys.settrace(original_trace)
        note_engine_for_statistics(runner)
        run_time = time.time() - self.start_time
        timed_out = runner.exit_reason == ExitReason.timeout
        if runner.last_data is None:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer), reverse=True
            )
        else:
            if timed_out:
                note_deprecation((
                    'Your tests are hitting the settings timeout (%.2fs). '
                    'This functionality will go away in a future release '
                    'and you should not rely on it. Instead, try setting '
                    'max_examples to be some value lower than %d (the number '
                    'of examples your test successfully ran here). Or, if you '
                    'would prefer your tests to run to completion, regardless '
                    'of how long they take, you can set the timeout value to '
                    'hypothesis.unlimited.'
                ) % (
                    self.settings.timeout, runner.valid_examples),
                    self.settings)
            if runner.valid_examples < min(
                self.settings.min_satisfying_examples,
                self.settings.max_examples,
            ) and not (
                runner.exit_reason == ExitReason.finished and
                self.at_least_one_success
            ):
                if timed_out:
                    raise Timeout((
                        'Ran out of time before finding a satisfying '
                        'example for '
                        '%s. Only found %d examples in ' +
                        '%.2fs.'
                    ) % (
                        get_pretty_function_description(self.test),
                        runner.valid_examples, run_time
                    ))
                else:
                    raise Unsatisfiable((
                        'Unable to satisfy assumptions of hypothesis '
                        '%s. Only %d examples considered '
                        'satisfied assumptions'
                    ) % (
                        get_pretty_function_description(self.test),
                        runner.valid_examples,))

        if not self.falsifying_examples:
            return

        flaky = 0

        self.__in_final_replay = True

        for falsifying_example in self.falsifying_examples:
            self.__was_flaky = False
            raised_exception = False
            try:
                with self.settings:
                    self.test_runner(
                        ConjectureData.for_buffer(falsifying_example.buffer),
                        reify_and_execute(
                            self.search_strategy, self.test,
                            print_example=True, is_final=True
                        ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.'
                )
            except:
                if len(self.falsifying_examples) <= 1:
                    raise
                raised_exception = True
                report(traceback.format_exc())

            if not raised_exception:
                if (
                    isinstance(
                        falsifying_example.__expected_exception,
                        DeadlineExceeded
                    ) and self.__test_runtime is not None
                ):
                    report((
                        'Unreliable test timings! On an initial run, this '
                        'test took %.2fms, which exceeded the deadline of '
                        '%.2fms, but on a subsequent run it took %.2f ms, '
                        'which did not. If you expect this sort of '
                        'variability in your test timings, consider turning '
                        'deadlines off for this test by setting deadline=None.'
                    ) % (
                        falsifying_example.__expected_exception.runtime,
                        self.settings.deadline, self.__test_runtime
                    ))
                else:
                    report(
                        'Failed to reproduce exception. Expected: \n' +
                        falsifying_example.__expected_traceback,
                    )

                filter_message = (
                    'Unreliable test data: Failed to reproduce a failure '
                    'and then when it came to recreating the example in '
                    'order to print the test data with a flaky result '
                    'the example was filtered out (by e.g. a '
                    'call to filter in your strategy) when we didn\'t '
                    'expect it to be.'
                )

                try:
                    self.test_runner(
                        ConjectureData.for_buffer(falsifying_example.buffer),
                        reify_and_execute(
                            self.search_strategy,
                            test_is_flaky(
                                self.test, self.repr_for_last_exception),
                            print_example=True, is_final=True
                        ))
                except (UnsatisfiedAssumption, StopTest):
                    self.__flaky(filter_message)
                except Flaky as e:
                    if len(self.falsifying_examples) > 1:
                        self.__flaky(e.args[0])
                    else:
                        raise

            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky((
                'Hypothesis found %d distinct failures, but %d of them '
                'exhibited some sort of flaky behaviour.') % (
                    len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures((
                'Hypothesis found %d distinct failures.') % (
                    len(self.falsifying_examples,)))

    def __flaky(self, message):
        if len(self.falsifying_examples) <= 1:
            raise Flaky(message)
        else:
            self.__was_flaky = True
            report('Flaky example! ' + message)
Exemplo n.º 19
0
marked covered when coverage is run with this plug-in.

DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
'''

from coverage import CoveragePlugin, CoverageData  # type: ignore[import]
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines
from time import time
from typing import Any

# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
# `coverage combine`. The convention seems to be .coverage.dotted.suffix based on the following link:
# https://coverage.readthedocs.io/en/coverage-5.5/cmd.html#combining-data-files-coverage-combine
cov_data = CoverageData(basename=f'.coverage.jit.{time()}')


def is_not_builtin_class(obj: Any) -> bool:
    return isclass(obj) and not type(obj).__module__ == 'builtins'


class JitPlugin(CoveragePlugin):  # type: ignore[misc, no-any-unimported]
    '''
    dynamic_context is an overridden function that gives us access to every frame run during the coverage process. We
    look for when the function being run is `should_drop`, as all functions that get passed into `should_drop` will be
    compiled and thus should be marked as covered.
    '''
    def dynamic_context(self, frame: Any) -> None:
        if frame.f_code.co_name == 'should_drop':
            obj = frame.f_locals['fn']
Exemplo n.º 20
0
class StateForActualGivenExecution(object):
    def __init__(self, test_runner, search_strategy, test, settings, random,
                 had_seed):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__had_seed = had_seed

        self.test = test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()
        self.failed_normally = False

        self.used_examples_from_database = False

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                parent = Collector._collectors[-1]

                # We include any files the collector has already decided to
                # trace whether or not on re-investigation we still think it
                # wants to trace them. The reason for this is that in some
                # cases coverage gets the wrong answer when we run it
                # ourselves due to reasons that are our fault but are hard to
                # fix (we lie about where certain functions come from).
                # This causes us to not record the actual test bodies as
                # covered. But if we intended to trace test bodies then the
                # file must already have been traced when getting to this point
                # and so will already be in the collector's data. Hence we can
                # use that information to get the correct answer here.
                # See issue 997 for more context.
                self.files_to_propagate = set(parent.data)
                self.hijack_collector(parent)

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None

    def execute(
        self,
        data,
        print_example=False,
        is_final=False,
        expected_failure=None,
        collect=False,
    ):
        text_repr = [None]
        if self.settings.deadline is None:
            test = self.test
        else:

            @proxies(self.test)
            def test(*args, **kwargs):
                self.__test_runtime = None
                initial_draws = len(data.draw_times)
                start = benchmark_time()
                result = self.test(*args, **kwargs)
                finish = benchmark_time()
                internal_draw_time = sum(data.draw_times[initial_draws:])
                runtime = (finish - start - internal_draw_time) * 1000
                self.__test_runtime = runtime
                if self.settings.deadline is not_set:
                    if (not self.__warned_deadline and runtime >= 200):
                        self.__warned_deadline = True
                        note_deprecation(
                            ('Test took %.2fms to run. In future the default '
                             'deadline setting will be 200ms, which will '
                             'make this an error. You can set deadline to '
                             'an explicit value of e.g. %d to turn tests '
                             'slower than this into an error, or you can set '
                             'it to None to disable this check entirely.') % (
                                 runtime,
                                 ceil(runtime / 100) * 100,
                             ))
                else:
                    current_deadline = self.settings.deadline
                    if not is_final:
                        current_deadline *= 1.25
                    if runtime >= current_deadline:
                        raise DeadlineExceeded(runtime, self.settings.deadline)
                return result

        def run(data):
            if not hasattr(data, 'can_reproduce_example_from_repr'):
                data.can_reproduce_example_from_repr = True
            with self.settings:
                with BuildContext(data, is_final=is_final):
                    with deterministic_PRNG():
                        args, kwargs = data.draw(self.search_strategy)
                    if expected_failure is not None:
                        text_repr[0] = arg_string(test, args, kwargs)

                    if print_example:
                        example = '%s(%s)' % (test.__name__,
                                              arg_string(test, args, kwargs))
                        try:
                            ast.parse(example)
                        except SyntaxError:
                            data.can_reproduce_example_from_repr = False
                        report('Falsifying example: %s' % (example, ))
                    elif current_verbosity() >= Verbosity.verbose:
                        report(lambda: 'Trying example: %s(%s)' %
                               (test.__name__, arg_string(test, args, kwargs)))

                    if self.collector is None or not collect:
                        with deterministic_PRNG():
                            return test(*args, **kwargs)
                    else:  # pragma: no cover
                        try:
                            self.collector.start()
                            with deterministic_PRNG():
                                return test(*args, **kwargs)
                        finally:
                            self.collector.stop()

        result = self.test_runner(data, run)
        if expected_failure is not None:
            exception, traceback = expected_failure
            if (isinstance(exception, DeadlineExceeded)
                    and self.__test_runtime is not None):
                report(
                    ('Unreliable test timings! On an initial run, this '
                     'test took %.2fms, which exceeded the deadline of '
                     '%.2fms, but on a subsequent run it took %.2f ms, '
                     'which did not. If you expect this sort of '
                     'variability in your test timings, consider turning '
                     'deadlines off for this test by setting deadline=None.') %
                    (exception.runtime, self.settings.deadline,
                     self.__test_runtime))
            else:
                report(
                    'Failed to reproduce exception. Expected: \n' +
                    traceback, )
            self.__flaky(
                ('Hypothesis %s(%s) produces unreliable results: Falsified'
                 ' on the first call but did not on a subsequent one') % (
                     test.__name__,
                     text_repr[0],
                 ))
        return result

    def should_trace(self, original_filename, frame):  # pragma: no cover
        disp = FileDisposition()
        assert original_filename is not None
        disp.original_filename = original_filename
        disp.canonical_filename = encoded_filepath(
            canonical_filename(original_filename))
        disp.source_filename = disp.canonical_filename
        disp.reason = ''
        disp.file_tracer = None
        disp.has_dynamic_filename = False
        disp.trace = hypothesis_check_include(disp.canonical_filename)
        if not disp.trace:
            disp.reason = 'hypothesis internal reasons'
        elif self.__existing_collector is not None:
            check = self.__existing_collector.should_trace(
                original_filename, frame)
            if check.trace:
                self.files_to_propagate.add(check.canonical_filename)
        return disp

    def hijack_collector(self, collector):  # pragma: no cover
        self.__existing_collector = collector
        original_save_data = collector.save_data

        def save_data(covdata):
            original_save_data(covdata)
            if collector.branch:
                covdata.add_arcs({
                    filename: {
                        arc: None
                        for arc in self.coverage_data.arcs(filename) or ()
                    }
                    for filename in self.files_to_propagate
                })
            else:
                covdata.add_lines({
                    filename: {
                        line: None
                        for line in self.coverage_data.lines(filename) or ()
                    }
                    for filename in self.files_to_propagate
                })
            collector.save_data = original_save_data

        collector.save_data = save_data

    def evaluate_test_data(self, data):
        try:
            if self.collector is None:
                result = self.execute(data)
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.execute(data, collect=True)
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename))
            if result is not None:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))

    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        if global_force_seed is None:
            database_key = str_to_bytes(fully_qualified_name(self.test))
        else:
            database_key = None
        self.start_time = time.time()
        global in_given
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )

        if in_given or self.collector is None:
            runner.run()
        else:  # pragma: no cover
            in_given = True
            original_trace = sys.gettrace()
            try:
                sys.settrace(None)
                runner.run()
            finally:
                in_given = False
                sys.settrace(original_trace)
                self.used_examples_from_database = \
                    runner.used_examples_from_database
        note_engine_for_statistics(runner)
        run_time = time.time() - self.start_time

        self.used_examples_from_database = runner.used_examples_from_database

        if runner.used_examples_from_database:
            if self.settings.derandomize:
                note_deprecation(
                    'In future derandomize will imply database=None, but your '
                    'test is currently using examples from the database. To '
                    'get the future behaviour, update your settings to '
                    'include database=None.')
            if self.__had_seed:
                note_deprecation(
                    'In future use of @seed will imply database=None in your '
                    'settings, but your test is currently using examples from '
                    'the database. To get the future behaviour, update your '
                    'settings for this test to include database=None.')

        timed_out = runner.exit_reason == ExitReason.timeout
        if runner.call_count == 0:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer),
                reverse=True)
        else:
            if timed_out:
                note_deprecation((
                    'Your tests are hitting the settings timeout (%.2fs). '
                    'This functionality will go away in a future release '
                    'and you should not rely on it. Instead, try setting '
                    'max_examples to be some value lower than %d (the number '
                    'of examples your test successfully ran here). Or, if you '
                    'would prefer your tests to run to completion, regardless '
                    'of how long they take, you can set the timeout value to '
                    'hypothesis.unlimited.') % (self.settings.timeout,
                                                runner.valid_examples),
                                 self.settings)
            if runner.valid_examples == 0:
                if timed_out:
                    raise Timeout(
                        ('Ran out of time before finding a satisfying '
                         'example for %s. Only found %d examples in %.2fs.') %
                        (get_pretty_function_description(
                            self.test), runner.valid_examples, run_time))
                else:
                    raise Unsatisfiable(
                        'Unable to satisfy assumptions of hypothesis %s.' %
                        (get_pretty_function_description(self.test), ))

        if not self.falsifying_examples:
            return

        self.failed_normally = True

        flaky = 0

        for falsifying_example in self.falsifying_examples:
            ran_example = ConjectureData.for_buffer(falsifying_example.buffer)
            self.__was_flaky = False
            assert falsifying_example.__expected_exception is not None
            try:
                self.execute(ran_example,
                             print_example=True,
                             is_final=True,
                             expected_failure=(
                                 falsifying_example.__expected_exception,
                                 falsifying_example.__expected_traceback,
                             ))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.')
            except BaseException:
                if len(self.falsifying_examples) <= 1:
                    raise
                report(traceback.format_exc())
            finally:  # pragma: no cover
                # This section is in fact entirely covered by the tests in
                # test_reproduce_failure, but it seems to trigger a lovely set
                # of coverage bugs: The branches show up as uncovered (despite
                # definitely being covered - you can add an assert False else
                # branch to verify this and see it fail - and additionally the
                # second branch still complains about lack of coverage even if
                # you add a pragma: no cover to it!
                # See https://bitbucket.org/ned/coveragepy/issues/623/
                if self.settings.print_blob is not PrintSettings.NEVER:
                    failure_blob = encode_failure(falsifying_example.buffer)
                    # Have to use the example we actually ran, not the original
                    # falsifying example! Otherwise we won't catch problems
                    # where the repr of the generated example doesn't parse.
                    can_use_repr = ran_example.can_reproduce_example_from_repr
                    if (self.settings.print_blob is PrintSettings.ALWAYS or
                        (self.settings.print_blob is PrintSettings.INFER
                         and not can_use_repr and len(failure_blob) < 200)):
                        report((
                            '\n'
                            'You can reproduce this example by temporarily '
                            'adding @reproduce_failure(%r, %r) as a decorator '
                            'on your test case') % (
                                __version__,
                                failure_blob,
                            ))
            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                ('Hypothesis found %d distinct failures, but %d of them '
                 'exhibited some sort of flaky behaviour.') %
                (len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures(('Hypothesis found %d distinct failures.') %
                                   (len(self.falsifying_examples, )))

    def __flaky(self, message):
        if len(self.falsifying_examples) <= 1:
            raise Flaky(message)
        else:
            self.__was_flaky = True
            report('Flaky example! ' + message)
Exemplo n.º 21
0
    def evaluate_test_data(self, data):
        if (time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT):
            fail_health_check(
                self.settings,
                ('Your test has been running for at least five minutes. This '
                 'is probably not what you intended, so by default Hypothesis '
                 'turns it into an error.'), HealthCheck.hung_test)

        try:
            if self.collector is None:
                result = self.test_runner(
                    data, reify_and_execute(
                        self.search_strategy,
                        self.test,
                    ))
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    try:
                        self.collector.data = {}
                        self.collector.start()
                        result = self.test_runner(
                            data,
                            reify_and_execute(
                                self.search_strategy,
                                self.test,
                            ))
                    finally:
                        self.collector.stop()
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        for lineno in covdata.lines(filename):
                            data.add_tag(Line(filename, lineno))
                        for source, target in covdata.arcs(filename):
                            data.add_tag(Arc(filename, source, target))
            if result is not None and self.settings.perform_health_check:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception:
            escalate_hypothesis_internal_error()
            data.__expected_exception = traceback.format_exc()
            verbose_report(data.__expected_exception)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))
Exemplo n.º 22
0
def get_cov_data(input_file):
    data = CoverageData()
    data.read_file(input_file)
    return data
Exemplo n.º 23
0
 def set_coverage(self, cov: coverage.CoverageData):
     for file in cov.measured_files():
         self.lines_covered[file] = frozenset(cov.lines(file))
         self.branches_covered[file] = frozenset(cov.arcs(file))
Exemplo n.º 24
0
    def __init__(self, test_runner, search_strategy, test, settings, random):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.at_least_one_success = False
        self.last_exception = None
        self.repr_for_last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__in_final_replay = False

        if self.settings.deadline is None:
            self.test = test
        else:
            @proxies(test)
            def timed_test(*args, **kwargs):
                self.__test_runtime = None
                start = time.time()
                result = test(*args, **kwargs)
                runtime = (time.time() - start) * 1000
                self.__test_runtime = runtime
                if self.settings.deadline is not_set:
                    if (
                        not self.__warned_deadline and
                        runtime >= 200
                    ):
                        self.__warned_deadline = True
                        note_deprecation((
                            'Test took %.2fms to run. In future the default '
                            'deadline setting will be 200ms, which will '
                            'make this an error. You can set deadline to '
                            'an explicit value of e.g. %d to turn tests '
                            'slower than this into an error, or you can set '
                            'it to None to disable this check entirely.') % (
                                runtime, ceil(runtime / 100) * 100,
                        ))
                elif runtime >= self.current_deadline:
                    raise DeadlineExceeded(runtime, self.settings.deadline)
                return result
            self.test = timed_test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                self.hijack_collector(Collector._collectors[-1])

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None
Exemplo n.º 25
0
 def set_coverage(self, cov: coverage.CoverageData):
     for file in cov.measured_files():
         self.lines_covered[file] = frozenset(cov.lines(file))
         self.branches_covered[file] = frozenset(cov.arcs(file))
Exemplo n.º 26
0
import sys

from coverage import CoverageData

# import this so we get our monkey patching done which is needed for
# os.path.relpath on python2.5
import xtraceback


def transform_data(data, transform):
    result = dict()
    for path, value in data.items():
        result[transform(path)] = value
    return result


if __name__ == "__main__":

    transform = getattr(os.path, "%spath" % sys.argv[1])
    paths = sys.argv[2:]
    assert paths

    for path in paths:
        data = CoverageData(path)
        data.read()
        for field in ("lines", "arcs"):
            field_data = getattr(data, field)
            assert field_data
            setattr(data, field, transform_data(field_data, transform))
        data.write()
Exemplo n.º 27
0
from coverage import CoverageData

# import this so we get our monkey patching done which is needed for
# os.path.relpath on python2.5
import xtraceback


def transform_data(data, transform):
    result = dict()
    for path, value in data.items():
        result[transform(path)] = value
    return result


if __name__ == "__main__":

    transform = getattr(os.path, "%spath" % sys.argv[1])
    paths = sys.argv[2:]
    assert paths

    for path in paths:
        data = CoverageData(path)
        data.read()
        for field in ("lines", "arcs"):
            field_data = getattr(data, field)
            assert field_data
            setattr(data, field,
                    transform_data(field_data, transform))
        data.write()
Exemplo n.º 28
0
This coverage plug-in attempts to cover JIT'd functions and methods that were previously missed in code coverage. Any
function and method that was passed through/decorated with torch.jit.script or torch.jit.script_method should now be
marked covered when coverage is run with this plug-in.

DISCLAIMER: note that this will mark the entire JIT'd function/method as covered without seeking proof that the
compiled code has been executed. This means that even if the code chunk is merely compiled and not run, it will get
marked as covered.
'''

from coverage import CoveragePlugin, CoverageData
from inspect import ismodule, isclass, ismethod, isfunction, iscode, getsourcefile, getsourcelines

# All coverage stats resulting from this plug-in will be in a separate .coverage file that should be merged later with
# `coverage combine`. The convention seems to be .coverage.dotted.suffix based on the following link:
# https://coverage.readthedocs.io/en/coverage-5.5/cmd.html#combining-data-files-coverage-combine
cov_data = CoverageData(basename='.coverage.jit')


def is_not_builtin_class(obj):
    return isclass(obj) and not type(obj).__module__ == 'builtins'


class JitPlugin(CoveragePlugin):
    '''
    dynamic_context is an overridden function that gives us access to every frame run during the coverage process. We
    look for when the function being run is `should_drop`, as all functions that get passed into `should_drop` will be
    compiled and thus should be marked as covered.
    '''
    def dynamic_context(self, frame):
        if frame.f_code.co_name == 'should_drop':
            obj = frame.f_locals['fn']
Exemplo n.º 29
0
    def __init__(self, test_runner, search_strategy, test, settings, random):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.at_least_one_success = False
        self.last_exception = None
        self.repr_for_last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__in_final_replay = False

        if self.settings.deadline is None:
            self.test = test
        else:

            @proxies(test)
            def timed_test(*args, **kwargs):
                self.__test_runtime = None
                start = time.time()
                result = test(*args, **kwargs)
                runtime = (time.time() - start) * 1000
                self.__test_runtime = runtime
                if self.settings.deadline is not_set:
                    if (not self.__warned_deadline and runtime >= 200):
                        self.__warned_deadline = True
                        note_deprecation(
                            ('Test took %.2fms to run. In future the default '
                             'deadline setting will be 200ms, which will '
                             'make this an error. You can set deadline to '
                             'an explicit value of e.g. %d to turn tests '
                             'slower than this into an error, or you can set '
                             'it to None to disable this check entirely.') % (
                                 runtime,
                                 ceil(runtime / 100) * 100,
                             ))
                elif runtime >= self.current_deadline:
                    raise DeadlineExceeded(runtime, self.settings.deadline)
                return result

            self.test = timed_test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                self.hijack_collector(Collector._collectors[-1])

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None
Exemplo n.º 30
0
class StateForActualGivenExecution(object):
    def __init__(self, test_runner, search_strategy, test, settings, random):
        self.test_runner = test_runner
        self.search_strategy = search_strategy
        self.settings = settings
        self.at_least_one_success = False
        self.last_exception = None
        self.repr_for_last_exception = None
        self.falsifying_examples = ()
        self.__was_flaky = False
        self.random = random
        self.__warned_deadline = False
        self.__existing_collector = None
        self.__test_runtime = None
        self.__in_final_replay = False

        if self.settings.deadline is None:
            self.test = test
        else:

            @proxies(test)
            def timed_test(*args, **kwargs):
                self.__test_runtime = None
                start = time.time()
                result = test(*args, **kwargs)
                runtime = (time.time() - start) * 1000
                self.__test_runtime = runtime
                if self.settings.deadline is not_set:
                    if (not self.__warned_deadline and runtime >= 200):
                        self.__warned_deadline = True
                        note_deprecation(
                            ('Test took %.2fms to run. In future the default '
                             'deadline setting will be 200ms, which will '
                             'make this an error. You can set deadline to '
                             'an explicit value of e.g. %d to turn tests '
                             'slower than this into an error, or you can set '
                             'it to None to disable this check entirely.') % (
                                 runtime,
                                 ceil(runtime / 100) * 100,
                             ))
                elif runtime >= self.current_deadline:
                    raise DeadlineExceeded(runtime, self.settings.deadline)
                return result

            self.test = timed_test

        self.coverage_data = CoverageData()
        self.files_to_propagate = set()

        if settings.use_coverage and not IN_COVERAGE_TESTS:  # pragma: no cover
            if Collector._collectors:
                self.hijack_collector(Collector._collectors[-1])

            self.collector = Collector(
                branch=True,
                timid=FORCE_PURE_TRACER,
                should_trace=self.should_trace,
                check_include=hypothesis_check_include,
                concurrency='thread',
                warn=escalate_warning,
            )
            self.collector.reset()

            # Hide the other collectors from this one so it doesn't attempt to
            # pause them (we're doing trace function management ourselves so
            # this will just cause problems).
            self.collector._collectors = []
        else:
            self.collector = None

    @property
    def current_deadline(self):
        base = self.settings.deadline
        if self.__in_final_replay:
            return base
        else:
            return base * 1.25

    def should_trace(self, original_filename, frame):  # pragma: no cover
        disp = FileDisposition()
        assert original_filename is not None
        disp.original_filename = original_filename
        disp.canonical_filename = encoded_filepath(
            canonical_filename(original_filename))
        disp.source_filename = disp.canonical_filename
        disp.reason = ''
        disp.file_tracer = None
        disp.has_dynamic_filename = False
        disp.trace = hypothesis_check_include(disp.canonical_filename)
        if not disp.trace:
            disp.reason = 'hypothesis internal reasons'
        elif self.__existing_collector is not None:
            check = self.__existing_collector.should_trace(
                original_filename, frame)
            if check.trace:
                self.files_to_propagate.add(check.canonical_filename)
        return disp

    def hijack_collector(self, collector):  # pragma: no cover
        self.__existing_collector = collector
        original_save_data = collector.save_data

        def save_data(covdata):
            original_save_data(covdata)
            if collector.branch:
                covdata.add_arcs({
                    filename:
                    {arc: None
                     for arc in self.coverage_data.arcs(filename)}
                    for filename in self.files_to_propagate
                })
            else:
                covdata.add_lines({
                    filename: {
                        line: None
                        for line in self.coverage_data.lines(filename)
                    }
                    for filename in self.files_to_propagate
                })
            collector.save_data = original_save_data

        collector.save_data = save_data

    def evaluate_test_data(self, data):
        if (time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT):
            fail_health_check(
                self.settings,
                ('Your test has been running for at least five minutes. This '
                 'is probably not what you intended, so by default Hypothesis '
                 'turns it into an error.'), HealthCheck.hung_test)

        try:
            if self.collector is None:
                result = self.test_runner(
                    data, reify_and_execute(
                        self.search_strategy,
                        self.test,
                    ))
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.test_runner(
                        data,
                        reify_and_execute(self.search_strategy,
                                          self.test,
                                          collector=self.collector))
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename))
            if result is not None and self.settings.perform_health_check:
                fail_health_check(
                    self.settings,
                    ('Tests run under @given should return None, but '
                     '%s returned %r instead.') % (self.test.__name__, result),
                    HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
                HypothesisDeprecationWarning,
                FailedHealthCheck,
                StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))

    def run(self):
        # Tell pytest to omit the body of this function from tracebacks
        __tracebackhide__ = True
        database_key = str_to_bytes(fully_qualified_name(self.test))
        self.start_time = time.time()
        global in_given
        runner = ConjectureRunner(
            self.evaluate_test_data,
            settings=self.settings,
            random=self.random,
            database_key=database_key,
        )

        if in_given or self.collector is None:
            runner.run()
        else:  # pragma: no cover
            in_given = True
            original_trace = sys.gettrace()
            try:
                sys.settrace(None)
                runner.run()
            finally:
                in_given = False
                sys.settrace(original_trace)
        note_engine_for_statistics(runner)
        run_time = time.time() - self.start_time
        timed_out = runner.exit_reason == ExitReason.timeout
        if runner.last_data is None:
            return
        if runner.interesting_examples:
            self.falsifying_examples = sorted(
                [d for d in runner.interesting_examples.values()],
                key=lambda d: sort_key(d.buffer),
                reverse=True)
        else:
            if timed_out:
                note_deprecation((
                    'Your tests are hitting the settings timeout (%.2fs). '
                    'This functionality will go away in a future release '
                    'and you should not rely on it. Instead, try setting '
                    'max_examples to be some value lower than %d (the number '
                    'of examples your test successfully ran here). Or, if you '
                    'would prefer your tests to run to completion, regardless '
                    'of how long they take, you can set the timeout value to '
                    'hypothesis.unlimited.') % (self.settings.timeout,
                                                runner.valid_examples),
                                 self.settings)
            if runner.valid_examples < min(
                    self.settings.min_satisfying_examples,
                    self.settings.max_examples,
            ) and not (runner.exit_reason == ExitReason.finished
                       and self.at_least_one_success):
                if timed_out:
                    raise Timeout(
                        ('Ran out of time before finding a satisfying '
                         'example for '
                         '%s. Only found %d examples in ' + '%.2fs.') %
                        (get_pretty_function_description(
                            self.test), runner.valid_examples, run_time))
                else:
                    raise Unsatisfiable(
                        ('Unable to satisfy assumptions of hypothesis '
                         '%s. Only %d examples considered '
                         'satisfied assumptions') % (
                             get_pretty_function_description(self.test),
                             runner.valid_examples,
                         ))

        if not self.falsifying_examples:
            return

        flaky = 0

        self.__in_final_replay = True

        for falsifying_example in self.falsifying_examples:
            self.__was_flaky = False
            raised_exception = False
            try:
                with self.settings:
                    self.test_runner(
                        ConjectureData.for_buffer(falsifying_example.buffer),
                        reify_and_execute(self.search_strategy,
                                          self.test,
                                          print_example=True,
                                          is_final=True))
            except (UnsatisfiedAssumption, StopTest):
                report(traceback.format_exc())
                self.__flaky(
                    'Unreliable assumption: An example which satisfied '
                    'assumptions on the first run now fails it.')
            except:
                if len(self.falsifying_examples) <= 1:
                    raise
                raised_exception = True
                report(traceback.format_exc())

            if not raised_exception:
                if (isinstance(falsifying_example.__expected_exception,
                               DeadlineExceeded)
                        and self.__test_runtime is not None):
                    report((
                        'Unreliable test timings! On an initial run, this '
                        'test took %.2fms, which exceeded the deadline of '
                        '%.2fms, but on a subsequent run it took %.2f ms, '
                        'which did not. If you expect this sort of '
                        'variability in your test timings, consider turning '
                        'deadlines off for this test by setting deadline=None.'
                    ) % (falsifying_example.__expected_exception.runtime,
                         self.settings.deadline, self.__test_runtime))
                else:
                    report(
                        'Failed to reproduce exception. Expected: \n' +
                        falsifying_example.__expected_traceback, )

                filter_message = (
                    'Unreliable test data: Failed to reproduce a failure '
                    'and then when it came to recreating the example in '
                    'order to print the test data with a flaky result '
                    'the example was filtered out (by e.g. a '
                    'call to filter in your strategy) when we didn\'t '
                    'expect it to be.')

                try:
                    self.test_runner(
                        ConjectureData.for_buffer(falsifying_example.buffer),
                        reify_and_execute(self.search_strategy,
                                          test_is_flaky(
                                              self.test,
                                              self.repr_for_last_exception),
                                          print_example=True,
                                          is_final=True))
                except (UnsatisfiedAssumption, StopTest):
                    self.__flaky(filter_message)
                except Flaky as e:
                    if len(self.falsifying_examples) > 1:
                        self.__flaky(e.args[0])
                    else:
                        raise

            if self.__was_flaky:
                flaky += 1

        # If we only have one example then we should have raised an error or
        # flaky prior to this point.
        assert len(self.falsifying_examples) > 1

        if flaky > 0:
            raise Flaky(
                ('Hypothesis found %d distinct failures, but %d of them '
                 'exhibited some sort of flaky behaviour.') %
                (len(self.falsifying_examples), flaky))
        else:
            raise MultipleFailures(('Hypothesis found %d distinct failures.') %
                                   (len(self.falsifying_examples, )))

    def __flaky(self, message):
        if len(self.falsifying_examples) <= 1:
            raise Flaky(message)
        else:
            self.__was_flaky = True
            report('Flaky example! ' + message)
Exemplo n.º 31
0
def process_revision(config, revision):
    storage = get_driver(Provider.CLOUDFILES)(
        config["RACKSPACE_USER"],
        config["RACKSPACE_APIKEY"],
        region=config["RACKSPACE_REGION"],
    )

    # Get our data files
    data = []
    for obj in storage.iterate_container_objects(
            Container(config["BUCKET"], None, storage),
            ex_prefix="data/{}".format(revision)):
        obj_data = b""
        for chunk in obj.as_stream():
            obj_data += chunk
        data.append(json.loads(lzma.decompress(obj_data).decode("utf8")))

    # Get our source files
    obj = storage.get_object(
        config["BUCKET"],
        "files/{revision}.tar.xz".format(revision=revision),
    )
    tarball = b""
    for chunk in obj.as_stream():
        tarball += chunk

    # Write out our source files into a temporary location
    with tempfile.TemporaryDirectory() as tmpdir:
        # Extract the tarball to our temporary directory
        with tarfile.open(fileobj=io.BytesIO(tarball), mode="r") as tb:
            tb.extractall(tmpdir)

        cdata = CoverageData()

        for datum in data:
            cdata.add_line_data({
                make_real_path(tmpdir, k): dict.fromkeys(v)
                for k, v in datum["data"].get("lines", {}).items()
            })
            cdata.add_arc_data({
                make_real_path(tmpdir, k): dict.fromkeys(map(tuple, v))
                for k, v in datum["data"].get("arcs", {}).items()
            })

        current_directory = os.path.abspath(".")
        try:
            os.chdir(tmpdir)

            cov = coverage()
            cov.data = cdata

            with tempfile.TemporaryDirectory() as htmldir:
                # Generate a HTML report for our data
                cov.html_report(directory=htmldir)

                # Upload the HTML report
                for dirname, dirnames, filenames in os.walk(htmldir):
                    for filename in filenames:
                        path = os.path.relpath(
                            os.path.join(dirname, filename),
                            htmldir,
                        )

                        storage.upload_object(
                            os.path.join(htmldir, path),
                            Container(config["BUCKET"], None, storage),
                            os.path.join("html", revision, path),
                        )
        finally:
            os.chdir(current_directory)
Exemplo n.º 32
0
def save_package_indexhtml(class_group, templates, output_dir, app_name,
                           granularity):
    folder = class_group[0].folder.replace('\\', '/')
    class_name_with_pkg = class_group[0].name
    package_name = Utils2.get_standart_package_name(class_name_with_pkg)
    init_table = templates['init_table.pt']
    init_row = templates['init_row.pt']
    index_template = templates['index.pt']

    slash_num = class_name_with_pkg.count('/')
    root_path = ''
    for i in range(slash_num):
        root_path += '../'
    total_coverage_data = CoverageData()
    rows = []
    for cl in class_group:
        elementlink = os.path.join(root_path, folder,
                                   cl.file_name + '.html').replace('\\', '/')
        elementname = cl.file_name
        coverage_data = CoverageData(lines=cl.coverable(),
                                     lines_missed=cl.not_covered(),
                                     lines_covered=cl.covered(),
                                     methods_covered=cl.mtds_covered(),
                                     methods_missed=cl.mtds_not_covered(),
                                     methods=cl.mtds_coverable())
        coverage_data.update_coverage_for_single_class_from_methods()
        coverage = coverage_data.get_coverage(granularity)
        row = init_row(
            elementlink=elementlink,
            type='class',
            elementname=elementname,
            coverage=coverage_data.format_coverage(coverage),
            respath=root_path,
            coverage_data=coverage_data,
            is_instruction_level=Granularity.is_instruction(granularity),
            progress_missed=coverage_data.missed(granularity),
            progress_covered=coverage_data.covered(granularity))
        rows.append(Markup(row))
        total_coverage_data.add_data(coverage_data)
    total_coverage = total_coverage_data.get_formatted_coverage(granularity)
    table = init_table(
        rows=Markup("\n".join(rows)),
        total_coverage=total_coverage,
        is_instruction_level=Granularity.is_instruction(granularity),
        total_coverage_data=total_coverage_data,
        progress_covered=total_coverage_data.covered(granularity),
        progress_all=total_coverage_data.coverable(granularity))
    html = index_template(table=Markup(table),
                          appname=app_name,
                          title=folder,
                          package=package_name,
                          respath=root_path,
                          file_name=None,
                          granularity=Granularity.get(granularity))
    rel_path = os.path.join(folder, 'index.html').replace('\\', '/')
    path = os.path.join(output_dir, rel_path).replace('\\', '/')
    with open(path, 'w') as f:
        f.write(html)
    return (package_name, rel_path, total_coverage_data)
Exemplo n.º 33
0
    def evaluate_test_data(self, data):
        if (
            time.time() - self.start_time >= HUNG_TEST_TIME_LIMIT
        ):
            fail_health_check(self.settings, (
                'Your test has been running for at least five minutes. This '
                'is probably not what you intended, so by default Hypothesis '
                'turns it into an error.'
            ), HealthCheck.hung_test)

        try:
            if self.collector is None:
                result = self.test_runner(data, reify_and_execute(
                    self.search_strategy, self.test,
                ))
            else:  # pragma: no cover
                # This should always be a no-op, but the coverage tracer has
                # a bad habit of resurrecting itself.
                original = sys.gettrace()
                sys.settrace(None)
                try:
                    self.collector.data = {}
                    result = self.test_runner(data, reify_and_execute(
                        self.search_strategy, self.test,
                        collector=self.collector
                    ))
                finally:
                    sys.settrace(original)
                    covdata = CoverageData()
                    self.collector.save_data(covdata)
                    self.coverage_data.update(covdata)
                    for filename in covdata.measured_files():
                        if is_hypothesis_file(filename):
                            continue
                        data.tags.update(
                            arc(filename, source, target)
                            for source, target in covdata.arcs(filename)
                        )
            if result is not None and self.settings.perform_health_check:
                fail_health_check(self.settings, (
                    'Tests run under @given should return None, but '
                    '%s returned %r instead.'
                ) % (self.test.__name__, result), HealthCheck.return_value)
            self.at_least_one_success = True
            return False
        except UnsatisfiedAssumption:
            data.mark_invalid()
        except (
            HypothesisDeprecationWarning, FailedHealthCheck,
            StopTest,
        ) + exceptions_to_reraise:
            raise
        except Exception as e:
            escalate_hypothesis_internal_error()
            data.__expected_traceback = traceback.format_exc()
            data.__expected_exception = e
            verbose_report(data.__expected_traceback)

            error_class, _, tb = sys.exc_info()

            origin = traceback.extract_tb(tb)[-1]
            filename = origin[0]
            lineno = origin[1]
            data.mark_interesting((error_class, filename, lineno))