Ejemplo n.º 1
0
    def run(self, tests):
        self.timeline.mark_event('Run: inside run method')

        from pycrunch.api.serializers import serialize_test_run
        from pycrunch.api.shared import timestamp
        from pycrunch.runner.interception import capture_stdout
        from pycrunch.shared.models import TestMetadata
        self.timeline.mark_event('Run: inside run method - imports complete')

        results = dict()
        for test_to_run in tests:
            self.timeline.begin_nested_interval(f'Running test {test_to_run.get("fqn", "unknown")}')

            # record traced variables
            state_timeline = InsightTimeline(clock=clock)
            state_timeline.start()
            inject_timeline(state_timeline)

            cov = self.start_coverage()

            try:
                with capture_stdout() as get_value:
                    time_start = timestamp()
                    metadata = TestMetadata(**test_to_run)
                    self.timeline.mark_event('About to start test execution')
                    execution_result = self.runner_engine.run_test(metadata)
                    self.timeline.mark_event('Test execution complete, postprocessing results')
                    time_end = timestamp()
                    time_elapsed = time_end - time_start

                    if not DISABLE_COVERAGE:
                        cov.stop()

                    self.timeline.mark_event('Coverage stopped')
                    fqn = metadata.fqn
                    captured_output = get_value()
                    self.timeline.mark_event('Received captured output')

                    execution_result.output_did_become_available(captured_output)
                    execution_result.state_timeline_did_become_available(state_timeline)

                    self.timeline.mark_event('Before coverage serialization')
                    coverage_for_run = serialize_test_run(cov, fqn, time_elapsed, test_metadata=test_to_run, execution_result=execution_result)
                    self.timeline.mark_event('After coverage serialization')
            except Exception as e:
                self.timeline.mark_event('Test execution exception.')
                pass
                # logger.exception('error during run', exc_info=e)
            results[fqn] = coverage_for_run
            self.timeline.end_nested_interval()
        return results
    def test_sample(self):
        test_meta = self.sample_metadata()
        state_timeline = InsightTimeline(clock=Clock())

        execution_result = ExecutionResult()
        execution_result.run_did_succeed()
        execution_result.state_timeline_did_become_available(state_timeline)
        run = CoverageRun('test1', 1, test_meta, execution_result)
        pprint.pprint(run.as_json())
    def test_sample(self):
        test_meta = TestMetadata('file1', 'test_a', 'module_a',
                                 'module_a:test_a', 'queued')
        state_timeline = InsightTimeline(clock=Clock())

        execution_result = ExecutionResult()
        execution_result.run_did_succeed()
        execution_result.state_timeline_did_become_available(state_timeline)
        run = CoverageRun('test1', 1, test_meta, execution_result)
        pprint(run.as_json())
Ejemplo n.º 4
0
def test_timeline_one_variable():
    variable_name = 42
    x = InsightTimeline(clock)
    x.start()

    x.record(variable_name=42)

    first = x.variables[0]
    assert 'variable_name' == first.name
    assert 42 == first.value
Ejemplo n.º 5
0
def test_timeline_positional_arguments_only():
    variable_a = 42
    x = InsightTimeline(clock)
    x.start()
    x.record(variable_a, 777)

    first = x.variables[0]
    assert '1' == first.name
    assert 42 == first.value

    second = x.variables[1]
    assert '2' == second.name
    assert 777 == second.value
Ejemplo n.º 6
0
def test_timeline_two_variables():
    variable_a = 42
    variable_b = 'Bret Victor'
    x = InsightTimeline(clock)
    x.start()
    x.record(variable_a=variable_a, brightest_mind=variable_b)

    first = x.variables[0]
    second = x.variables[1]
    assert 'variable_a' == first.name
    assert 'brightest_mind' == second.name
    assert 42 == first.value
    assert variable_b == second.value
Ejemplo n.º 7
0
def test_should_adjust_to_clock_start():
    clock = MagicMock()
    now_mock = MagicMock()
    now_mock.side_effect = (100, 101, 110)

    # 100 -- 101 ----------------- 110
    # events should be in timeline time:
    # 1 ----------------- 10
    clock.now = now_mock
    x = InsightTimeline(clock)
    x.start()
    x.record(x=1)
    x.record(y=2)
    variables = x.variables
    assert variables[0].timestamp == 1
    assert variables[1].timestamp == 10
    def test_unsupported_type_uuid_in_state_timeline_dict(self):
        state_timeline = InsightTimeline(clock=Clock())
        state_timeline.start()

        state_timeline.record(dict(some=uuid.uuid1()))
        t = dict(a=1, nested=dict(some=uuid.uuid1()))
        x = pprint.pformat(t)

        state_timeline.record(t)
        state_timeline.record(TestCoverageRun)
        state_timeline.record(self)

        execution_result = ExecutionResult()
        execution_result.run_did_succeed()
        execution_result.state_timeline_did_become_available(state_timeline)
        x = json.dumps(state_timeline.as_json())
        print(x)
Ejemplo n.º 9
0
    def run(self, tests):
        self.timeline.mark_event('Run: inside run method')
        from pycrunch.introspection.clock import clock
        from pycrunch.runner.interception import capture_stdout
        from pycrunch.shared.primitives import TestMetadata
        self.timeline.mark_event('Run: inside run method - imports complete')

        results = dict()
        for test_to_run in tests:
            self.timeline.begin_nested_interval(
                f'Running test {test_to_run.get("fqn", "unknown")}')

            # record traced variables
            state_timeline = InsightTimeline(clock=clock)
            state_timeline.start()
            inject_timeline(state_timeline)

            metadata = TestMetadata(**test_to_run)
            try:
                # TODO: Check if starting coverage AFTER pytest.main,
                #    before test_method enter, improves time in magnitudes.
                #  ---
                #    checked, there are 2x improvement for small files (0.06 vs 0.10, but still
                #      slow as before on 500+ tests in one file
                should_disable_coverage = DISABLE_COVERAGE
                if self.child_config.enable_remote_debug:
                    should_disable_coverage = True
                cov = CoverageAbstraction(should_disable_coverage,
                                          self.coverage_exclusions,
                                          self.timeline)
                cov.start()

                with capture_stdout() as get_value:
                    time_start = clock.now()
                    self.timeline.mark_event('About to start test execution')
                    execution_result = self.runner_engine.run_test(metadata)
                    self.timeline.mark_event(
                        'Test execution complete, postprocessing results')
                    time_end = clock.now()
                    time_elapsed = time_end - time_start

                    cov.stop()

                    _now = datetime.datetime.now()

                    print(
                        f'{os.linesep}at {_now.strftime("%X.%f")[:-3]} {_now.strftime("%x")}'
                    )

                    captured_output = get_value()
                    self.timeline.mark_event('Received captured output')

                    execution_result.output_did_become_available(
                        captured_output)
                    execution_result.state_timeline_did_become_available(
                        state_timeline)

                    self.timeline.mark_event('Before coverage serialization')
                    coverage_for_run = self.serialize_test_run(
                        cov,
                        metadata.fqn,
                        time_elapsed,
                        test_metadata=test_to_run,
                        execution_result=execution_result)
                    self.timeline.mark_event('After coverage serialization')
            except Exception as e:
                # Here is most likely exception in the engine itself.
                self.timeline.mark_event('Test execution exception.')
                import sys
                tb = self.get_detailed_traceback(metadata.fqn)
                print(tb, file=sys.__stdout__)
                from pycrunch.api.serializers import CoverageRun
                result = ExecutionResult.create_failed_with_reason(tb)
                # inject fake run to not crash entire pipeline
                coverage_for_run = CoverageRun(metadata.fqn,
                                               -1,
                                               test_to_run,
                                               execution_result=result)

                # logger.exception('error during run', exc_info=e)

            results[metadata.fqn] = coverage_for_run
            self.timeline.end_nested_interval()
        return results