def test_sample(self): test_meta = self.sample_metadata() state_timeline = InsightTimeline(clock=Clock()) execution_result = ExecutionResult() execution_result.run_did_succeed() execution_result.state_timeline_did_become_available(state_timeline) run = CoverageRun('test1', 1, test_meta, execution_result) pprint.pprint(run.as_json())
def test_sample(self): test_meta = TestMetadata('file1', 'test_a', 'module_a', 'module_a:test_a', 'queued') state_timeline = InsightTimeline(clock=Clock()) execution_result = ExecutionResult() execution_result.run_did_succeed() execution_result.state_timeline_did_become_available(state_timeline) run = CoverageRun('test1', 1, test_meta, execution_result) pprint(run.as_json())
def serialize_test_run(self, cov, fqn, time_elapsed, test_metadata, execution_result): """ :type cov: CoverageAbstraction :param time_elapsed: float :param fqn: str """ run_results = CoverageRun(fqn, time_elapsed, test_metadata, execution_result) run_results.store_files_coverage(cov.parse_all_hit_lines()) return run_results
async def run(self): """ Here we run multiple tests at once using one or multiple processes """ self.timeline.mark_event('run') watchdog_pipeline.add_task(TestExecutionBeginTask(len(self.tests))) socket_notification_task = asyncio.ensure_future( engine.tests_will_run(self.tests)) converted_tests = self.get_converted_test_list() runner = self.create_test_runner() # while not cancelled runner_task = asyncio.ensure_future(runner.run(tests=converted_tests)) run_results_compound = await self.wait_with_cancellation(runner_task) if run_results_compound.is_failed(): failure_reason = self.user_friendly_error_message( run_results_compound.status) for _ in converted_tests: candidate_fqn = _['fqn'] cov_run = CoverageRun( candidate_fqn, -1, None, execution_result=ExecutionResult.create_failed_with_reason( failure_reason)) run_results_compound.results[candidate_fqn] = cov_run run_results = run_results_compound.results self.timeline.mark_event('before tests_did_run') # asynchronously send message over websocket # Line bellow communicates test statuses as a side effect async_tasks_post = [engine.tests_did_run(run_results)] self.post_process_combined_coverage(run_results) self.timeline.mark_event('Sending: test_run_completed event') # todo: i'm only using `filename` in connector, why bother with everything? cov_to_send = dict(all_runs=self.convert_result_to_json(run_results)) async_tasks_post.append( shared.pipe.push( event_type='test_run_completed', coverage=cov_to_send, timings=dict(start=self.timestamp, end=clock.now()), )) self.timeline.mark_event('Started combined coverage serialization') serialized = serialize_combined_coverage(combined_coverage) self.timeline.mark_event('Completed combined coverage serialization') self.timeline.mark_event('Sending: combined coverage over WS') async_tasks_post.append( shared.pipe.push( event_type='combined_coverage_updated', combined_coverage=serialized, # Todo: why do I need dependencies to be exposed? It is internal state. # dependencies=self.build_dependencies(), aggregated_results=engine.all_tests.legacy_aggregated_statuses( ), timings=dict(start=self.timestamp, end=clock.now()), )) self.timeline.mark_event( 'Waiting until post-processing tasks are completed') await asyncio.gather(*async_tasks_post) watchdog_pipeline.add_task(TestExecutionEndTask()) self.timeline.mark_event('Send: done, stopping timeline') self.timeline.stop() execution_history.save(self.timeline)
def run(self, tests): self.timeline.mark_event('Run: inside run method') from pycrunch.introspection.clock import clock from pycrunch.runner.interception import capture_stdout from pycrunch.shared.primitives import TestMetadata self.timeline.mark_event('Run: inside run method - imports complete') results = dict() for test_to_run in tests: self.timeline.begin_nested_interval( f'Running test {test_to_run.get("fqn", "unknown")}') # record traced variables state_timeline = InsightTimeline(clock=clock) state_timeline.start() inject_timeline(state_timeline) metadata = TestMetadata(**test_to_run) try: # TODO: Check if starting coverage AFTER pytest.main, # before test_method enter, improves time in magnitudes. # --- # checked, there are 2x improvement for small files (0.06 vs 0.10, but still # slow as before on 500+ tests in one file should_disable_coverage = DISABLE_COVERAGE if self.child_config.enable_remote_debug: should_disable_coverage = True cov = CoverageAbstraction(should_disable_coverage, self.coverage_exclusions, self.timeline) cov.start() with capture_stdout() as get_value: time_start = clock.now() self.timeline.mark_event('About to start test execution') execution_result = self.runner_engine.run_test(metadata) self.timeline.mark_event( 'Test execution complete, postprocessing results') time_end = clock.now() time_elapsed = time_end - time_start cov.stop() _now = datetime.datetime.now() print( f'{os.linesep}at {_now.strftime("%X.%f")[:-3]} {_now.strftime("%x")}' ) captured_output = get_value() self.timeline.mark_event('Received captured output') execution_result.output_did_become_available( captured_output) execution_result.state_timeline_did_become_available( state_timeline) self.timeline.mark_event('Before coverage serialization') coverage_for_run = self.serialize_test_run( cov, metadata.fqn, time_elapsed, test_metadata=test_to_run, execution_result=execution_result) self.timeline.mark_event('After coverage serialization') except Exception as e: # Here is most likely exception in the engine itself. self.timeline.mark_event('Test execution exception.') import sys tb = self.get_detailed_traceback(metadata.fqn) print(tb, file=sys.__stdout__) from pycrunch.api.serializers import CoverageRun result = ExecutionResult.create_failed_with_reason(tb) # inject fake run to not crash entire pipeline coverage_for_run = CoverageRun(metadata.fqn, -1, test_to_run, execution_result=result) # logger.exception('error during run', exc_info=e) results[metadata.fqn] = coverage_for_run self.timeline.end_nested_interval() return results