Exemplo n.º 1
0
    def run_test(self, test) -> ExecutionResult:
        # import django
        # django.setup()
        # from django.apps import apps
        # apps.set_available_apps([])

        execution_result = ExecutionResult()
        try:
            fqn_test_to_run = test.filename + '::' + test.name
            pprint(fqn_test_to_run)
            plugin = PyTestInterceptionPlugin(tests_to_run=[fqn_test_to_run])

            # pytest.main(['tests_two.py::test_x', '-p', 'no:terminal'])
            # q - quite
            # s - do not capture console logs
            pytest.main([fqn_test_to_run, '-qs'], plugins=[plugin])
            # pprint(dict(passed_tests=plugin.passed_tests))
            # pprint(dict(failed_tests=plugin.failed_tests))

            # print('testing output interception')
            # print(vars(foo))
            if plugin.guess_run_status(test.name):
                execution_result.run_did_succeed()
            else:
                execution_result.run_did_fail()

            # logger.debug('after exec_module')
        except Exception as e:
            etype, value, current_traceback = sys.exc_info()
            execution_result.record_exception(
                etype=etype, value=value, current_traceback=current_traceback)
            execution_result.run_did_fail()
            last_call = -1
            traceback.print_exception(etype=etype,
                                      value=value,
                                      tb=current_traceback,
                                      limit=last_call,
                                      file=sys.stdout)
            # traceback.print_exc(file=sys.stdout)
            # print(str(e))
            logger.exception('Error while executing _run_test', exc_info=e)

        return execution_result
Exemplo n.º 2
0
def test_pinned_test_should_change_status():
    sut = AllTests()
    fqn = 'module:test'
    state = TestState(TestMetadata('dummy', 'dummy', 'dummy', fqn, 'pending'),
                      ExecutionResult(), False)
    sut.test_discovered(fqn, state, False)
    single_test = sut.tests.get(fqn, None)
    assert single_test.pinned == False
    sut.pin_test(fqn)
    single_test = sut.tests.get(fqn, None)
    assert single_test.pinned == True
    def test_sample(self):
        test_meta = self.sample_metadata()
        state_timeline = InsightTimeline(clock=Clock())

        execution_result = ExecutionResult()
        execution_result.run_did_succeed()
        execution_result.state_timeline_did_become_available(state_timeline)
        run = CoverageRun('test1', 1, test_meta, execution_result)
        pprint.pprint(run.as_json())
    def test_sample(self):
        test_meta = TestMetadata('file1', 'test_a', 'module_a',
                                 'module_a:test_a', 'queued')
        state_timeline = InsightTimeline(clock=Clock())

        execution_result = ExecutionResult()
        execution_result.run_did_succeed()
        execution_result.state_timeline_did_become_available(state_timeline)
        run = CoverageRun('test1', 1, test_meta, execution_result)
        pprint(run.as_json())
    def test_unsupported_type_uuid_in_state_timeline_dict(self):
        state_timeline = InsightTimeline(clock=Clock())
        state_timeline.start()

        state_timeline.record(dict(some=uuid.uuid1()))
        t = dict(a=1, nested=dict(some=uuid.uuid1()))
        x = pprint.pformat(t)

        state_timeline.record(t)
        state_timeline.record(TestCoverageRun)
        state_timeline.record(self)

        execution_result = ExecutionResult()
        execution_result.run_did_succeed()
        execution_result.state_timeline_did_become_available(state_timeline)
        x = json.dumps(state_timeline.as_json())
        print(x)
Exemplo n.º 6
0
    def run_test(self, test):
        execution_result = ExecutionResult()
        try:
            logger.debug('before _run_module...')
            spec = importlib.util.spec_from_file_location(
                "fake.name", test.filename)
            logger.debug(
                '  spec_from_file_location -> done; importing module...')

            foo = importlib.util.module_from_spec(spec)

            logger.debug('  module_from_spec -> done; going to exec_module...')
            # logger.warning(f'_run_test->vars {vars(foo)}')
            spec.loader.exec_module(foo)
            method_to_call = getattr(foo, test.name, None)
            # print(vars(foo))
            if method_to_call:
                method_to_call()
                execution_result.run_did_succeed()
            # logger.debug('after exec_module')
        except Exception as e:
            etype, value, current_traceback = sys.exc_info()
            execution_result.record_exception(
                etype=etype, value=value, current_traceback=current_traceback)
            execution_result.run_did_fail()
            last_call = -1
            traceback.print_exception(etype=etype,
                                      value=value,
                                      tb=current_traceback,
                                      limit=last_call,
                                      file=sys.stdout)
            # traceback.print_exc(file=sys.stdout)
            # print(str(e))
            logger.exception('Error while executing _run_test', exc_info=e)

        return execution_result
Exemplo n.º 7
0
    async def run(self):
        """
            Here we run multiple tests at once using one or multiple processes
        """
        self.timeline.mark_event('run')
        watchdog_pipeline.add_task(TestExecutionBeginTask(len(self.tests)))
        socket_notification_task = asyncio.ensure_future(
            engine.tests_will_run(self.tests))

        converted_tests = self.get_converted_test_list()
        runner = self.create_test_runner()

        # while not cancelled
        runner_task = asyncio.ensure_future(runner.run(tests=converted_tests))
        run_results_compound = await self.wait_with_cancellation(runner_task)
        if run_results_compound.is_failed():
            failure_reason = self.user_friendly_error_message(
                run_results_compound.status)

            for _ in converted_tests:
                candidate_fqn = _['fqn']
                cov_run = CoverageRun(
                    candidate_fqn,
                    -1,
                    None,
                    execution_result=ExecutionResult.create_failed_with_reason(
                        failure_reason))
                run_results_compound.results[candidate_fqn] = cov_run

        run_results = run_results_compound.results

        self.timeline.mark_event('before tests_did_run')

        # asynchronously send message over websocket
        # Line bellow communicates test statuses as a side effect
        async_tasks_post = [engine.tests_did_run(run_results)]

        self.post_process_combined_coverage(run_results)

        self.timeline.mark_event('Sending: test_run_completed event')
        # todo: i'm only using `filename` in connector, why bother with everything?
        cov_to_send = dict(all_runs=self.convert_result_to_json(run_results))
        async_tasks_post.append(
            shared.pipe.push(
                event_type='test_run_completed',
                coverage=cov_to_send,
                timings=dict(start=self.timestamp, end=clock.now()),
            ))

        self.timeline.mark_event('Started combined coverage serialization')
        serialized = serialize_combined_coverage(combined_coverage)
        self.timeline.mark_event('Completed combined coverage serialization')

        self.timeline.mark_event('Sending: combined coverage over WS')
        async_tasks_post.append(
            shared.pipe.push(
                event_type='combined_coverage_updated',
                combined_coverage=serialized,
                # Todo: why do I need dependencies to be exposed? It is internal state.
                # dependencies=self.build_dependencies(),
                aggregated_results=engine.all_tests.legacy_aggregated_statuses(
                ),
                timings=dict(start=self.timestamp, end=clock.now()),
            ))

        self.timeline.mark_event(
            'Waiting until post-processing tasks are completed')
        await asyncio.gather(*async_tasks_post)
        watchdog_pipeline.add_task(TestExecutionEndTask())

        self.timeline.mark_event('Send: done, stopping timeline')

        self.timeline.stop()
        execution_history.save(self.timeline)
    def run_test(self, test):
        """

        :type test: object
        :return val pycrunch.runner.execution_result.ExecutionResult
        """
        execution_result = ExecutionResult()
        try:
            fqn_test_to_run = test.filename + '::' + test.name

            plugin = PyTestInterceptionPlugin([fqn_test_to_run])

            # pytest.main(['tests_two.py::test_x', '-p', 'no:terminal'])
            # q - quite
            # s - do not capture console logs
            # l - show variable values in the current stack
            additional_pytest_args = ['-qs', '-l']
            plugins_arg = []

            if not self.child_config.load_pytest_plugins:
                os.environ['PYTEST_DISABLE_PLUGIN_AUTOLOAD'] = 'True'
                plugins_arg += ['-p', 'no:junitxml']

            # --trace-config
            # , '-p', 'no:helpconfig', - cannot be disabled
            all_args = additional_pytest_args + plugins_arg
            # print(all_args, file=sys.__stdout__)
            if self.child_config.enable_remote_debug:
                try:
                    # Todo: this is too late to check for debugger existence.
                    #   Need verify before `debug` button click
                    import pydevd_pycharm
                    pydevd_pycharm.settrace(
                        '127.0.0.1',
                        suspend=False,
                        port=self.child_config.remote_debug_port,
                        stdoutToServer=True,
                        stderrToServer=True)
                except ModuleNotFoundError as e:
                    print('---\nFailed to import pydevd_pycharm',
                          file=sys.__stdout__)
                    print(
                        '  Make sure you install pudb pycharm bindings by running:',
                        file=sys.__stdout__)
                    print('pip install pydevd-pycharm\n---',
                          file=sys.__stdout__)
                    raise
            pytest.main([fqn_test_to_run] + all_args, plugins=[plugin])

            # pytest.main([fqn_test_to_run, '-qs'], plugins=[plugin])

            if plugin.guess_run_status(test.name):
                execution_result.run_did_succeed()
            else:
                execution_result.run_did_fail()
            # x.stop()
            # logger.debug('after exec_module')
        except Exception as e:
            etype, value, current_traceback = sys.exc_info()
            execution_result.record_exception(
                etype=etype, value=value, current_traceback=current_traceback)
            execution_result.run_did_fail()
            last_call = -1
            traceback.print_exception(etype=etype,
                                      value=value,
                                      tb=current_traceback,
                                      limit=last_call,
                                      file=sys.stdout)
            # traceback.print_exc(file=sys.stdout)
            # print(str(e))
            logger.exception('Error while executing _run_test', exc_info=e)
        return execution_result
Exemplo n.º 9
0
    def run(self, tests):
        self.timeline.mark_event('Run: inside run method')
        from pycrunch.introspection.clock import clock
        from pycrunch.runner.interception import capture_stdout
        from pycrunch.shared.primitives import TestMetadata
        self.timeline.mark_event('Run: inside run method - imports complete')

        results = dict()
        for test_to_run in tests:
            self.timeline.begin_nested_interval(
                f'Running test {test_to_run.get("fqn", "unknown")}')

            # record traced variables
            state_timeline = InsightTimeline(clock=clock)
            state_timeline.start()
            inject_timeline(state_timeline)

            metadata = TestMetadata(**test_to_run)
            try:
                # TODO: Check if starting coverage AFTER pytest.main,
                #    before test_method enter, improves time in magnitudes.
                #  ---
                #    checked, there are 2x improvement for small files (0.06 vs 0.10, but still
                #      slow as before on 500+ tests in one file
                should_disable_coverage = DISABLE_COVERAGE
                if self.child_config.enable_remote_debug:
                    should_disable_coverage = True
                cov = CoverageAbstraction(should_disable_coverage,
                                          self.coverage_exclusions,
                                          self.timeline)
                cov.start()

                with capture_stdout() as get_value:
                    time_start = clock.now()
                    self.timeline.mark_event('About to start test execution')
                    execution_result = self.runner_engine.run_test(metadata)
                    self.timeline.mark_event(
                        'Test execution complete, postprocessing results')
                    time_end = clock.now()
                    time_elapsed = time_end - time_start

                    cov.stop()

                    _now = datetime.datetime.now()

                    print(
                        f'{os.linesep}at {_now.strftime("%X.%f")[:-3]} {_now.strftime("%x")}'
                    )

                    captured_output = get_value()
                    self.timeline.mark_event('Received captured output')

                    execution_result.output_did_become_available(
                        captured_output)
                    execution_result.state_timeline_did_become_available(
                        state_timeline)

                    self.timeline.mark_event('Before coverage serialization')
                    coverage_for_run = self.serialize_test_run(
                        cov,
                        metadata.fqn,
                        time_elapsed,
                        test_metadata=test_to_run,
                        execution_result=execution_result)
                    self.timeline.mark_event('After coverage serialization')
            except Exception as e:
                # Here is most likely exception in the engine itself.
                self.timeline.mark_event('Test execution exception.')
                import sys
                tb = self.get_detailed_traceback(metadata.fqn)
                print(tb, file=sys.__stdout__)
                from pycrunch.api.serializers import CoverageRun
                result = ExecutionResult.create_failed_with_reason(tb)
                # inject fake run to not crash entire pipeline
                coverage_for_run = CoverageRun(metadata.fqn,
                                               -1,
                                               test_to_run,
                                               execution_result=result)

                # logger.exception('error during run', exc_info=e)

            results[metadata.fqn] = coverage_for_run
            self.timeline.end_nested_interval()
        return results
Exemplo n.º 10
0
 def test_discovered(self, fqn, discovered_test, is_pinned):
     # todo preserve state
     self.tests[fqn] = TestState(discovered_test, ExecutionResult(), is_pinned)
     combined_coverage.test_did_removed(fqn)