async def execute_with_delay(self): target_at_enter = self.ts_target try: delay = target_at_enter - clock.now() if delay < self.debounce_delay: delay = self.debounce_delay logger.debug( f'execute_with_delay->waiting for {round(delay, 4)} seconds') while clock.now() < target_at_enter: await sleep(0.01, loop=self._loop) except asyncio.CancelledError as e: logger.debug( f'execute_with_delay: cancelled to aggregate more tasks') return except Exception as e: logger.exception(f'exc', exc_info=e) return # after sleep, it may not be a good idea to cancel; use events? copy_of_list = list(self.dirty_tests) logger.info( f'execute_with_delay: Total {len(copy_of_list)} tests will run with delay {round(delay, 4)} seconds', ) execution_pipeline.add_task( # While this is running - append only, do not issue another tasks RunTestTask(copy_of_list, RemoteDebugParams.disabled())) self.dirty_tests = [] self.run_pending = False self.reset_deadline() self.run_timer = None
def __init__(self, tests, remote_debug_params: RemoteDebugParams): self.remote_debug_params = remote_debug_params self.timestamp = clock.now() self.tests = tests self.results = None self.timeline = Timeline('run tests') self.timeline.start()
def delay_a_little(self): now = clock.now() inside_debounce_interval = self.ts_created < now < self.ts_target # print(f' {self.ts_created} < {now} < {self.ts_target}') # print(f' === {inside_debounce_interval}') if inside_debounce_interval: self.ts_target += self.debounce_delay * 1 if self.ts_target - now > 1.0: self.ts_target = 1.0 if self.ts_target - now < 0: self.ts_target = 0.05 logger.debug( f' delay_a_little: updated ts_target, to run in {round(self.ts_target - now, 4)} secs' ) if self.run_pending and self.run_timer: logger.debug(f'run_pending, cancelling...') copied_ref = self.run_timer copied_ref.cancel() logger.debug( f' Cancelled pending run. Waiting for more tests to run at once.' ) self.run_timer = None self.force_schedule_run()
def reset_deadline(self, multiplier=1.0): now = clock.now() self.ts_created = now delay_multiplier = self.debounce_delay * multiplier if delay_multiplier > 0.85: delay_multiplier = 0.85 self.ts_target = now + delay_multiplier # (s)
def __init__(self, debounce_delay: 'float'): self.run_timer = None # type: 'function' now = clock.now() self.ts_created = now self.ts_target = now + debounce_delay self.ts_target = None self.run_pending = False self.dirty_tests = [] self.debounce_delay = debounce_delay self._loop = get_event_loop()
async def run(self): """ Here we run multiple tests at once using one or multiple processes """ self.timeline.mark_event('run') watchdog_pipeline.add_task(TestExecutionBeginTask(len(self.tests))) socket_notification_task = asyncio.ensure_future( engine.tests_will_run(self.tests)) converted_tests = self.get_converted_test_list() runner = self.create_test_runner() # while not cancelled runner_task = asyncio.ensure_future(runner.run(tests=converted_tests)) run_results_compound = await self.wait_with_cancellation(runner_task) if run_results_compound.is_failed(): failure_reason = self.user_friendly_error_message( run_results_compound.status) for _ in converted_tests: candidate_fqn = _['fqn'] cov_run = CoverageRun( candidate_fqn, -1, None, execution_result=ExecutionResult.create_failed_with_reason( failure_reason)) run_results_compound.results[candidate_fqn] = cov_run run_results = run_results_compound.results self.timeline.mark_event('before tests_did_run') # asynchronously send message over websocket # Line bellow communicates test statuses as a side effect async_tasks_post = [engine.tests_did_run(run_results)] self.post_process_combined_coverage(run_results) self.timeline.mark_event('Sending: test_run_completed event') # todo: i'm only using `filename` in connector, why bother with everything? cov_to_send = dict(all_runs=self.convert_result_to_json(run_results)) async_tasks_post.append( shared.pipe.push( event_type='test_run_completed', coverage=cov_to_send, timings=dict(start=self.timestamp, end=clock.now()), )) self.timeline.mark_event('Started combined coverage serialization') serialized = serialize_combined_coverage(combined_coverage) self.timeline.mark_event('Completed combined coverage serialization') self.timeline.mark_event('Sending: combined coverage over WS') async_tasks_post.append( shared.pipe.push( event_type='combined_coverage_updated', combined_coverage=serialized, # Todo: why do I need dependencies to be exposed? It is internal state. # dependencies=self.build_dependencies(), aggregated_results=engine.all_tests.legacy_aggregated_statuses( ), timings=dict(start=self.timestamp, end=clock.now()), )) self.timeline.mark_event( 'Waiting until post-processing tasks are completed') await asyncio.gather(*async_tasks_post) watchdog_pipeline.add_task(TestExecutionEndTask()) self.timeline.mark_event('Send: done, stopping timeline') self.timeline.stop() execution_history.save(self.timeline)
def run(self, tests): self.timeline.mark_event('Run: inside run method') from pycrunch.introspection.clock import clock from pycrunch.runner.interception import capture_stdout from pycrunch.shared.primitives import TestMetadata self.timeline.mark_event('Run: inside run method - imports complete') results = dict() for test_to_run in tests: self.timeline.begin_nested_interval( f'Running test {test_to_run.get("fqn", "unknown")}') # record traced variables state_timeline = InsightTimeline(clock=clock) state_timeline.start() inject_timeline(state_timeline) metadata = TestMetadata(**test_to_run) try: # TODO: Check if starting coverage AFTER pytest.main, # before test_method enter, improves time in magnitudes. # --- # checked, there are 2x improvement for small files (0.06 vs 0.10, but still # slow as before on 500+ tests in one file should_disable_coverage = DISABLE_COVERAGE if self.child_config.enable_remote_debug: should_disable_coverage = True cov = CoverageAbstraction(should_disable_coverage, self.coverage_exclusions, self.timeline) cov.start() with capture_stdout() as get_value: time_start = clock.now() self.timeline.mark_event('About to start test execution') execution_result = self.runner_engine.run_test(metadata) self.timeline.mark_event( 'Test execution complete, postprocessing results') time_end = clock.now() time_elapsed = time_end - time_start cov.stop() _now = datetime.datetime.now() print( f'{os.linesep}at {_now.strftime("%X.%f")[:-3]} {_now.strftime("%x")}' ) captured_output = get_value() self.timeline.mark_event('Received captured output') execution_result.output_did_become_available( captured_output) execution_result.state_timeline_did_become_available( state_timeline) self.timeline.mark_event('Before coverage serialization') coverage_for_run = self.serialize_test_run( cov, metadata.fqn, time_elapsed, test_metadata=test_to_run, execution_result=execution_result) self.timeline.mark_event('After coverage serialization') except Exception as e: # Here is most likely exception in the engine itself. self.timeline.mark_event('Test execution exception.') import sys tb = self.get_detailed_traceback(metadata.fqn) print(tb, file=sys.__stdout__) from pycrunch.api.serializers import CoverageRun result = ExecutionResult.create_failed_with_reason(tb) # inject fake run to not crash entire pipeline coverage_for_run = CoverageRun(metadata.fqn, -1, test_to_run, execution_result=result) # logger.exception('error during run', exc_info=e) results[metadata.fqn] = coverage_for_run self.timeline.end_nested_interval() return results
def start(self): self.relative_to = clock.now() self.root.start()
def __init__(self, name, relative_to): self.relative_to = relative_to self.timestamp = clock.now() self.name = name
def stop(self): self.stopped_at = clock.now()
def start(self): self.started_at = clock.now()