async def execute_with_delay(self): target_at_enter = self.ts_target try: delay = target_at_enter - clock.now() if delay < self.debounce_delay: delay = self.debounce_delay logger.debug( f'execute_with_delay->waiting for {round(delay, 4)} seconds') while clock.now() < target_at_enter: await sleep(0.01, loop=self._loop) except asyncio.CancelledError as e: logger.debug( f'execute_with_delay: cancelled to aggregate more tasks') return except Exception as e: logger.exception(f'exc', exc_info=e) return # after sleep, it may not be a good idea to cancel; use events? copy_of_list = list(self.dirty_tests) logger.info( f'execute_with_delay: Total {len(copy_of_list)} tests will run with delay {round(delay, 4)} seconds', ) execution_pipeline.add_task( # While this is running - append only, do not issue another tasks RunTestTask(copy_of_list, RemoteDebugParams.disabled())) self.dirty_tests = [] self.run_pending = False self.reset_deadline() self.run_timer = None
async def run(self): await shared.pipe.push( event_type='file_modification', modified_file=self.file, ts=self.timestamp, ) # todo # look out for new tests in changed files # clean up zombie tests # run impacted tests and newly discovered discovery = SimpleTestDiscovery() old_map = test_map.get_immutable_tests_for_file(self.file) possibly_new_tests = discovery.find_tests_in_folder( state.engine.folder, search_only_in=[self.file]) await state.engine.test_discovery_will_become_available( possibly_new_tests) new_map = test_map.get_immutable_tests_for_file(self.file) removed_tests = set() added_tests = set() for t in old_map: if t not in new_map: removed_tests.add(t) for t in new_map: if t not in old_map: added_tests.add(t) execution_plan = set() for new_test in possibly_new_tests.tests: if new_test.fqn in new_map: # todo should depend on execution mode execution_plan.add(new_test.fqn) dependencies = combined_coverage.dependencies if dependencies: impacted_tests = dependencies[self.file] for fqn in impacted_tests: if fqn not in removed_tests: execution_plan.add(fqn) else: print(f"test {fqn} removed from execution plan") if state.config.engine_mode == 'manual': print( 'Manual mode, tests wont run. Consider switching engine mode to auto' ) return tests_to_run = state.engine.all_tests.collect_by_fqn( execution_plan) dirty_tests = self.consider_engine_mode(tests_to_run) execution_pipeline.add_task(RunTestTask(dirty_tests)) pass
async def run(self): if self.file.endswith(CONFIG_FILE_NAME): execution_pipeline.add_task(ConfigReloadTask()) return # look out for new tests in changed files # clean up zombie tests # run impacted tests and newly discovered # todo: Do not block event_loop! discovery = create_test_discovery() old_map = test_map.get_immutable_tests_for_file(self.file) possibly_new_tests = discovery.find_tests_in_folder( state.engine.folder, search_only_in=[self.file]) await state.engine.test_discovery_will_become_available( possibly_new_tests) new_map = test_map.get_immutable_tests_for_file(self.file) removed_tests = set() added_tests = set() for t in old_map: if t not in new_map: removed_tests.add(t) for t in new_map: if t not in old_map: added_tests.add(t) execution_plan = set() for new_test in possibly_new_tests.tests: if new_test.fqn in new_map: execution_plan.add(new_test.fqn) dependencies = combined_coverage.dependencies if dependencies: impacted_tests = dependencies[self.file] for fqn in impacted_tests: if fqn not in removed_tests: execution_plan.add(fqn) else: print(f"test {fqn} removed from execution plan") if state.config.engine_mode == 'manual': print( 'Manual mode, tests wont run. Consider switching engine mode to auto' ) return tests_to_run = state.engine.all_tests.collect_by_fqn( execution_plan) dirty_tests = self.consider_engine_mode(tests_to_run) run_debouncer.add_tests(dirty_tests) await run_debouncer.schedule_run()
async def handle_my_custom_event(sid, json): logger.debug('received json (my event 2): ' + str(json)) if 'action' not in json: logger.debug('no action specified') action = json.get('action') if action == 'discovery': await engine.will_start_test_discovery() if action == 'run-tests' or action == 'debug-tests': if 'tests' not in json: logger.error('run-tests command received, but no tests specified') return logger.info('Running tests...') tests = json.get('tests') fqns = set() for test in tests: fqns.add(test['fqn']) tests_to_run = all_tests.collect_by_fqn(fqns) if action == 'debug-tests': debugger_port = json.get('debugger_port') debug_params = RemoteDebugParams(True, debugger_port) else: debug_params = RemoteDebugParams.disabled() execution_pipeline.add_task(RunTestTask(tests_to_run, debug_params)) if action == 'load-file': if config.enable_web_ui: filename = json.get('filename') logger.debug('download_file ' + filename) # return asynchronously execution_pipeline.add_task(DownloadFileTask(filename)) if action == 'diagnostics': await engine.will_start_diagnostics_collection() if action == 'timings': await engine.will_send_timings() if action == 'pin-tests': # fqns = array of strings[] fqns = json.get('fqns') await engine.tests_will_pin(fqns) if action == 'unpin-tests': fqns = json.get('fqns') await engine.tests_will_unpin(fqns) if action == 'engine-mode': new_mode = json.get('mode') engine.engine_mode_will_change(new_mode) if action == 'watchdog-terminate': print('action == watchdog-terminate -> TerminateTestExecutionTask') watchdog_pipeline.add_task(TerminateTestExecutionTask())
async def handle_my_custom_event(sid, json): logger.debug('received json (my event 2): ' + str(json)) if 'action' not in json: logger.debug('no action specified') action = json.get('action') if action == 'discovery': await engine.will_start_test_discovery() if action == 'run-tests': if 'tests' not in json: logger.error('run-tests command received, but no tests specified') return logger.info('Running tests...') tests = json.get('tests') fqns = set() for test in tests: fqns.add(test['fqn']) tests_to_run = all_tests.collect_by_fqn(fqns) execution_pipeline.add_task(RunTestTask(tests_to_run)) if action == 'load-file': filename = json.get('filename') logger.debug('download_file ' + filename) # return asynchronously execution_pipeline.add_task(DownloadFileTask(filename)) if action == 'diagnostics': await engine.will_start_diagnostics_collection() if action == 'timings': await engine.will_send_timings() if action == 'pin-tests': # fqns = array of strings[] fqns = json.get('fqns') await engine.tests_will_pin(fqns) if action == 'unpin-tests': fqns = json.get('fqns') await engine.tests_will_unpin(fqns) if action == 'engine-mode': new_mode = json.get('mode') engine.engine_mode_will_change(new_mode)
async def thread_proc(self): from pycrunch.pipeline.file_modification_task import FileModifiedNotificationTask logger.debug('thread_proc') logger.debug(f'files {self.files}') logger.debug(f'files {self.files}') path = Path('.').absolute() print('watching this:...') print(path) async for changes in awatch(path, watcher_cls=PythonWatcher): for c in changes: change_type = c[0] force = False if change_type == Change.added: force = True file = c[1] logger.info( f'File watcher alarm: file: `{file}` type `{change_type}` ' ) if force or self.should_watch(file): if change_type == Change.deleted: execution_pipeline.add_task(FileRemovedTask(file=file)) logger.info('Added file removal for pipeline ' + file) else: execution_pipeline.add_task( FileModifiedNotificationTask(file=file)) logger.info('Added file modification for pipeline ' + file) else: logger.debug('non-significant file changed ' + file) logger.debug('END thread_proc')