コード例 #1
0
def test_double_nesting():
    sut = Timeline('test_double_nest')
    with use_clock_mock() as clock_mock:
        # 1 - root
        #   2 ... 4.5 nested_1
        #       3.1 ... 4.1 double_nested
        #  5 end root
        clock_mock.now.return_value = 1.0
        sut.start()
        clock_mock.now.return_value = 2.0
        sut.begin_nested_interval('nested_1')
        clock_mock.now.return_value = 3.1
        sut.begin_nested_interval('double_nested')
        clock_mock.now.return_value = 4.1

        sut.end_nested_interval()
        clock_mock.now.return_value = 4.5
        sut.end_nested_interval()

        clock_mock.now.return_value = 5
        sut.stop()
        sut.to_console()
        assert len(sut.root.intervals) == 1
        assert sut.root.duration() == 4
        nested_1 = sut.root.intervals[0]
        assert nested_1.duration() == 2.5
        double_nested = nested_1.intervals[0]
        assert double_nested.duration() == pytest.approx(1)

        print_duration(sut)
コード例 #2
0
def test_multiple_intervals():
    sut = Timeline('test_intervals')
    with use_clock_mock() as clock_mock:
        # 1 - root
        #   2 ... 3 nested_1
        #   3.1 ... 4.1 nested_2
        #  5 end root
        clock_mock.now.return_value = 1.0
        sut.start()
        clock_mock.now.return_value = 2.0
        sut.begin_nested_interval('nested_1')
        clock_mock.now.return_value = 3.0
        sut.end_nested_interval()
        clock_mock.now.return_value = 3.1
        sut.begin_nested_interval('nested_2')
        clock_mock.now.return_value = 4.6
        sut.end_nested_interval()

        clock_mock.now.return_value = 5
        sut.stop()

        assert len(sut.root.intervals) == 2
        assert sut.root.duration() == 4
        nested_interval_1 = sut.root.intervals[0]
        assert nested_interval_1.duration() == 1
        nested_interval_2 = sut.root.intervals[1]
        assert nested_interval_2.duration() == pytest.approx(1.5)
        print_duration(sut)
コード例 #3
0
def test_timesheet_duration_single_interval():
    sut = Timeline('test 1')
    with use_clock_mock() as clock_mock:
        clock_mock.now.return_value = 1.0
        sut.start()
        clock_mock.now.return_value = 2.0
        sut.stop()
        assert sut.duration() == 1.0
        print_duration(sut)
コード例 #4
0
async def main():
    # import pydevd_pycharm
    #
    # pydevd_pycharm.settrace('localhost', port=21345, stdoutToServer=True, stderrToServer=True)

    timeline = Timeline('multiprocess run engine')
    timeline.start()
    timeline.mark_event('__main__')

    timeline.mark_event('ArgumentParser: Init')

    parser = argparse.ArgumentParser(description='PyCrunch CLI')

    parser.add_argument('--engine',
                        help='Engine used, one of [pytest, django, simple]')
    parser.add_argument('--port',
                        help='PyCrunch-Engine server port to connect')
    parser.add_argument(
        '--task-id',
        help='Id of task when multiple test runners ran at same time')
    parser.add_argument('--load-pytest-plugins',
                        help='If this is true, execution will be slower.')
    parser.add_argument(
        '--enable-remote-debug',
        action='store_true',
        help=
        'If this is true, remote debug will be enabled on a --remote-debugger-port'
    )
    parser.add_argument(
        '--remote-debugger-port',
        help=
        'If remote debug is enabled, this will specify a port used to connect to PyCharm pudb'
    )
    args = parser.parse_args()
    timeline.mark_event('ArgumentParser: parse_args completed')
    engine_to_use = args.engine
    if engine_to_use:
        from pycrunch.child_runtime.child_config import child_config
        child_config.use_engine(engine_to_use)
        if args.load_pytest_plugins.lower() == 'true':
            child_config.load_pytest_plugins = True
        if args.enable_remote_debug:
            child_config.enable_remote_debugging(args.remote_debugger_port)

    timeline.mark_event('Before run')

    await run(engine_to_use=engine_to_use,
              timeline=timeline,
              port=args.port,
              task_id=args.task_id)
コード例 #5
0
def test_serialization():
    sut = Timeline('complex')
    # 1 - root
    #   2 ... 4.5 nested_1
    #       3.1 ... 4.1 double_nested
    #  5 end root
    sut.start()
    # assert 1 == 2
    sut.begin_nested_interval('nested_1')

    sut.begin_nested_interval('a')
    sut.end_nested_interval()

    sut.begin_nested_interval('b')
    sut.end_nested_interval()

    sut.begin_nested_interval('c')
    sut.end_nested_interval()

    sut.end_nested_interval()

    sut.begin_nested_interval('1')
    sut.end_nested_interval()

    sut.begin_nested_interval('2')

    sut.begin_nested_interval('2.1')
    sut.end_nested_interval()

    sut.begin_nested_interval('2.2')

    sut.begin_nested_interval('2.2.1')
    sut.end_nested_interval()

    sut.end_nested_interval()

    sut.end_nested_interval()
    sut.mark_event('aaa')
    sut.begin_nested_interval('3')
    sut.end_nested_interval()

    sut.stop()

    execution_history.save(sut)
    pprint(execution_history.to_json())
コード例 #6
0
def test_nested_interval():
    sut = Timeline('test_intervals')
    with use_clock_mock() as clock_mock:
        clock_mock.now.return_value = 1.0
        sut.start()
        clock_mock.now.return_value = 2.0
        sut.begin_nested_interval('nested')
        clock_mock.now.return_value = 12.0

        sut.end_nested_interval()
        clock_mock.now.return_value = 13.0

        sut.stop()
        assert len(sut.root.intervals) == 1
        assert sut.root.duration() == 12.0
        sub_interval = sut.root.intervals[0]
        assert sub_interval.duration() == 10.0
        print_duration(sut)
コード例 #7
0
def test_complex_nesting():
    sut = Timeline('complex')
    # 1 - root
    #   2 ... 4.5 nested_1
    #       3.1 ... 4.1 double_nested
    #  5 end root
    sut.start()

    sut.begin_nested_interval('nested_1')

    sut.begin_nested_interval('a')
    sut.end_nested_interval()

    sut.begin_nested_interval('b')
    sut.end_nested_interval()

    sut.begin_nested_interval('c')
    sut.end_nested_interval()

    sut.end_nested_interval()

    sut.begin_nested_interval('1')
    sut.end_nested_interval()

    sut.begin_nested_interval('2')

    sut.begin_nested_interval('2.1')
    sut.end_nested_interval()

    sut.begin_nested_interval('2.2')

    sut.begin_nested_interval('2.2.1')
    sut.end_nested_interval()

    sut.end_nested_interval()

    sut.end_nested_interval()

    sut.begin_nested_interval('3')
    sut.end_nested_interval()

    sut.stop()
    sut.to_console()
コード例 #8
0
def test_marker_inside_interval():
    sut = Timeline('test 1')
    with use_clock_mock() as clock_mock:
        clock_mock.now.return_value = 1.0
        sut.start()
        clock_mock.now.return_value = 2.0
        sut.begin_nested_interval('nested')
        clock_mock.now.return_value = 3.0
        sut.mark_event('event_1')
        clock_mock.now.return_value = 4.0
        sut.end_nested_interval()
        sut.mark_event('outer')
        clock_mock.now.return_value = 5.0
        sut.stop()
        sut.to_console()

    assert len(sut.root.events) == 1
    assert sut.root.events[0].name == 'outer'
    assert sut.root.events[0].timestamp == 4.0
    assert sut.root.intervals[0].events[0].timestamp == 3.0
    assert sut.root.intervals[0].events[0].name == 'event_1'
コード例 #9
0
async def main():
    # import pydevd_pycharm
    #
    # pydevd_pycharm.settrace('localhost', port=21345, stdoutToServer=True, stderrToServer=True)

    timeline = Timeline('multiprocess run engine')
    timeline.start()
    timeline.mark_event('__main__')

    timeline.mark_event('ArgumentParser: Init')

    parser = argparse.ArgumentParser(description='PyCrunch CLI')

    parser.add_argument('--engine',
                        help='Engine used, one of [pytest, django, simple]')
    parser.add_argument('--port',
                        help='PyCrunch-Engine server port to connect')
    parser.add_argument(
        '--task-id',
        help='Id of task when multiple test runners ran at same time')

    args = parser.parse_args()
    timeline.mark_event('ArgumentParser: parse_args completed')
    engine_to_use = args.engine
    if engine_to_use:
        from pycrunch.session import config

        config.runtime_engine_will_change(engine_to_use)
    # with open(f'.{os.sep}child_process.log', 'a') as file:
    #     file.writelines(['huita',''])
    #     file.write(os.linesep)
    #
    # print(Path('.').absolute())
    timeline.mark_event('Before run')

    await run(engine_to_use=engine_to_use,
              timeline=timeline,
              port=args.port,
              task_id=args.task_id)
コード例 #10
0
class RunTestTask(AbstractTask):
    def __init__(self, tests, remote_debug_params: RemoteDebugParams):
        self.remote_debug_params = remote_debug_params
        self.timestamp = clock.now()
        self.tests = tests
        self.results = None
        self.timeline = Timeline('run tests')
        self.timeline.start()

    async def run(self):
        """
            Here we run multiple tests at once using one or multiple processes
        """
        self.timeline.mark_event('run')
        watchdog_pipeline.add_task(TestExecutionBeginTask(len(self.tests)))
        socket_notification_task = asyncio.ensure_future(
            engine.tests_will_run(self.tests))

        converted_tests = self.get_converted_test_list()
        runner = self.create_test_runner()

        # while not cancelled
        runner_task = asyncio.ensure_future(runner.run(tests=converted_tests))
        run_results_compound = await self.wait_with_cancellation(runner_task)
        if run_results_compound.is_failed():
            failure_reason = self.user_friendly_error_message(
                run_results_compound.status)

            for _ in converted_tests:
                candidate_fqn = _['fqn']
                cov_run = CoverageRun(
                    candidate_fqn,
                    -1,
                    None,
                    execution_result=ExecutionResult.create_failed_with_reason(
                        failure_reason))
                run_results_compound.results[candidate_fqn] = cov_run

        run_results = run_results_compound.results

        self.timeline.mark_event('before tests_did_run')

        # asynchronously send message over websocket
        # Line bellow communicates test statuses as a side effect
        async_tasks_post = [engine.tests_did_run(run_results)]

        self.post_process_combined_coverage(run_results)

        self.timeline.mark_event('Sending: test_run_completed event')
        # todo: i'm only using `filename` in connector, why bother with everything?
        cov_to_send = dict(all_runs=self.convert_result_to_json(run_results))
        async_tasks_post.append(
            shared.pipe.push(
                event_type='test_run_completed',
                coverage=cov_to_send,
                timings=dict(start=self.timestamp, end=clock.now()),
            ))

        self.timeline.mark_event('Started combined coverage serialization')
        serialized = serialize_combined_coverage(combined_coverage)
        self.timeline.mark_event('Completed combined coverage serialization')

        self.timeline.mark_event('Sending: combined coverage over WS')
        async_tasks_post.append(
            shared.pipe.push(
                event_type='combined_coverage_updated',
                combined_coverage=serialized,
                # Todo: why do I need dependencies to be exposed? It is internal state.
                # dependencies=self.build_dependencies(),
                aggregated_results=engine.all_tests.legacy_aggregated_statuses(
                ),
                timings=dict(start=self.timestamp, end=clock.now()),
            ))

        self.timeline.mark_event(
            'Waiting until post-processing tasks are completed')
        await asyncio.gather(*async_tasks_post)
        watchdog_pipeline.add_task(TestExecutionEndTask())

        self.timeline.mark_event('Send: done, stopping timeline')

        self.timeline.stop()
        execution_history.save(self.timeline)

    def user_friendly_error_message(self, status: str):
        failure_reason = 'epic fail'
        if status == 'terminated':
            failure_reason = 'Test execution terminated by user.'
        if status == 'timeout':
            line1 = f'Timeout of {config.execution_timeout_in_seconds} seconds reached while waiting for test execution to complete.'
            line2 = f'Consider increasing it in .pycrunch-config.yaml, e.g.:'
            line3 = f'{os.linesep}engine:{os.linesep}    timeout: 999{os.linesep}'
            line4 = f'Setting it to zero will wait forever.{os.linesep}'
            line5 = 'https://pycrunch.com/docs/configuration-file'
            failure_reason = os.linesep.join(
                [line1, line2, line3, line4, line5])
        return failure_reason

    def convert_result_to_json(self, run_results):
        results_as_json = dict()
        for k, v in run_results.items():
            results_as_json[k] = v.as_json()
        return results_as_json

    def post_process_combined_coverage(self, run_results):
        if self.remote_debug_params.enabled:
            self.timeline.mark_event(
                'Postprocessing: combined coverage will not be recomputed.')
            return

        self.timeline.mark_event(
            'Postprocessing: combined coverage, line hits, dependency tree')
        combined_coverage.add_multiple_results(run_results)
        self.timeline.mark_event('Postprocessing: completed')

    def create_test_runner(self):
        self.timeline.mark_event('before running tests')

        runner = MultiprocessTestRunner(
            timeout=config.get_execution_timeout(),
            timeline=self.timeline,
            test_run_scheduler=TestRunScheduler(
                cpu_cores=config.cpu_cores,
                threshold=config.multiprocessing_threshold),
            remote_debug_params=self.remote_debug_params,
        )
        return runner

    async def wait_with_cancellation(
            self, runner_task: asyncio.Future) -> TestRunStatus:
        try:
            # Here we wait for the first event, which may be:
            # 1. Watchdog termination signal
            # 2. Test runner events
            #  2.1 Run to end
            #  2.2 Timeout during run
            waited = await asyncio.wait(
                [termination_event.wait(), runner_task],
                return_when=asyncio.FIRST_COMPLETED)
            if runner_task.done():
                return TestRunStatus('success', runner_task.result())
            if termination_event.is_set():
                logger.warning('Looks like task was cancelled by user...')
                runner_task.cancel()
                return TestRunStatus('terminated')
        except asyncio.TimeoutError:
            logger.warning('Timeout reached while waiting for tests...')
            return TestRunStatus('timeout')

    def get_converted_test_list(self):
        converted_tests = list()
        for test in self.tests:
            # todo why is this line exist?
            converted_tests.append(
                dict(fqn=test.discovered_test.fqn,
                     filename=test.discovered_test.filename,
                     name=test.discovered_test.name,
                     module=test.discovered_test.module,
                     state='converted'))
        return converted_tests
コード例 #11
0
class RunTestTask(AbstractTask):
    def __init__(self, tests):
        self.timestamp = shared.timestamp()
        self.tests = tests
        self.results = None
        self.timeline = Timeline('run tests')
        self.timeline.start()

    def results_available(self, results):
        print('results avail:')
        pprint(results)
        self.results = results

    async def run(self):
        self.timeline.mark_event('run')

        await engine.tests_will_run(self.tests)
        converted_tests = list()
        for test in self.tests:
            converted_tests.append(
                dict(fqn=test.discovered_test.fqn,
                     filename=test.discovered_test.filename,
                     name=test.discovered_test.name,
                     module=test.discovered_test.module,
                     state='converted'))

        runner = MultiprocessTestRunner(timeout=30, timeline=self.timeline)
        self.timeline.mark_event('before running tests')
        await runner.run(tests=converted_tests)
        self.results = runner.results
        # runner = TestRunner(runner_engine=runner_engine)
        # with ModuleCleanup() as cleanup:
        #     results = runner.run(self.tests)
        if self.results is not None:
            logger.debug('results are not none')
        if self.results is None:
            logger.error('!!! None in results')

        self.timeline.mark_event('before tests_did_run')
        if not self.results:
            self.results = dict()

        await engine.tests_did_run(self.results)

        self.timeline.mark_event(
            'Postprocessing: combined coverage, line hits, dependency tree')
        combined_coverage.add_multiple_results(self.results)
        self.timeline.mark_event('Postprocessing: completed')

        results_as_json = dict()
        for k, v in self.results.items():
            results_as_json[k] = v.as_json()

        self.timeline.mark_event('Sending: test_run_completed event')

        await shared.pipe.push(
            event_type='test_run_completed',
            coverage=dict(all_runs=results_as_json),
            # data=serialize_test_set_state(self.tests),
            timings=dict(start=self.timestamp, end=shared.timestamp()),
        ),

        self.timeline.mark_event('Started combined coverage serialization')
        serialized = serialize_combined_coverage(combined_coverage)
        self.timeline.mark_event('Completed combined coverage serialization')

        self.timeline.mark_event('Send: combined coverage over WS')
        await shared.pipe.push(
            event_type='combined_coverage_updated',
            combined_coverage=serialized,
            dependencies={
                entry_point: list(filenames)
                for entry_point, filenames in
                combined_coverage.dependencies.items()
            },
            aggregated_results=engine.all_tests.legacy_aggregated_statuses(),
            timings=dict(start=self.timestamp, end=shared.timestamp()),
        ),

        self.timeline.mark_event('Send: done, stopping timeline')

        self.timeline.stop()
        execution_history.save(self.timeline)

        pass