def test_sample(self): test_meta = self.sample_metadata() state_timeline = InsightTimeline(clock=Clock()) execution_result = ExecutionResult() execution_result.run_did_succeed() execution_result.state_timeline_did_become_available(state_timeline) run = CoverageRun('test1', 1, test_meta, execution_result) pprint.pprint(run.as_json())
def test_sample(self): test_meta = TestMetadata('file1', 'test_a', 'module_a', 'module_a:test_a', 'queued') state_timeline = InsightTimeline(clock=Clock()) execution_result = ExecutionResult() execution_result.run_did_succeed() execution_result.state_timeline_did_become_available(state_timeline) run = CoverageRun('test1', 1, test_meta, execution_result) pprint(run.as_json())
def test_unsupported_type_uuid_in_state_timeline_dict(self): state_timeline = InsightTimeline(clock=Clock()) state_timeline.start() state_timeline.record(dict(some=uuid.uuid1())) t = dict(a=1, nested=dict(some=uuid.uuid1())) x = pprint.pformat(t) state_timeline.record(t) state_timeline.record(TestCoverageRun) state_timeline.record(self) execution_result = ExecutionResult() execution_result.run_did_succeed() execution_result.state_timeline_did_become_available(state_timeline) x = json.dumps(state_timeline.as_json()) print(x)
def run_test(self, test) -> ExecutionResult: # import django # django.setup() # from django.apps import apps # apps.set_available_apps([]) execution_result = ExecutionResult() try: fqn_test_to_run = test.filename + '::' + test.name pprint(fqn_test_to_run) plugin = PyTestInterceptionPlugin(tests_to_run=[fqn_test_to_run]) # pytest.main(['tests_two.py::test_x', '-p', 'no:terminal']) # q - quite # s - do not capture console logs pytest.main([fqn_test_to_run, '-qs'], plugins=[plugin]) # pprint(dict(passed_tests=plugin.passed_tests)) # pprint(dict(failed_tests=plugin.failed_tests)) # print('testing output interception') # print(vars(foo)) if plugin.guess_run_status(test.name): execution_result.run_did_succeed() else: execution_result.run_did_fail() # logger.debug('after exec_module') except Exception as e: etype, value, current_traceback = sys.exc_info() execution_result.record_exception( etype=etype, value=value, current_traceback=current_traceback) execution_result.run_did_fail() last_call = -1 traceback.print_exception(etype=etype, value=value, tb=current_traceback, limit=last_call, file=sys.stdout) # traceback.print_exc(file=sys.stdout) # print(str(e)) logger.exception('Error while executing _run_test', exc_info=e) return execution_result
def run_test(self, test): execution_result = ExecutionResult() try: logger.debug('before _run_module...') spec = importlib.util.spec_from_file_location( "fake.name", test.filename) logger.debug( ' spec_from_file_location -> done; importing module...') foo = importlib.util.module_from_spec(spec) logger.debug(' module_from_spec -> done; going to exec_module...') # logger.warning(f'_run_test->vars {vars(foo)}') spec.loader.exec_module(foo) method_to_call = getattr(foo, test.name, None) # print(vars(foo)) if method_to_call: method_to_call() execution_result.run_did_succeed() # logger.debug('after exec_module') except Exception as e: etype, value, current_traceback = sys.exc_info() execution_result.record_exception( etype=etype, value=value, current_traceback=current_traceback) execution_result.run_did_fail() last_call = -1 traceback.print_exception(etype=etype, value=value, tb=current_traceback, limit=last_call, file=sys.stdout) # traceback.print_exc(file=sys.stdout) # print(str(e)) logger.exception('Error while executing _run_test', exc_info=e) return execution_result
def run_test(self, test): """ :type test: object :return val pycrunch.runner.execution_result.ExecutionResult """ execution_result = ExecutionResult() try: fqn_test_to_run = test.filename + '::' + test.name plugin = PyTestInterceptionPlugin([fqn_test_to_run]) # pytest.main(['tests_two.py::test_x', '-p', 'no:terminal']) # q - quite # s - do not capture console logs # l - show variable values in the current stack additional_pytest_args = ['-qs', '-l'] plugins_arg = [] if not self.child_config.load_pytest_plugins: os.environ['PYTEST_DISABLE_PLUGIN_AUTOLOAD'] = 'True' plugins_arg += ['-p', 'no:junitxml'] # --trace-config # , '-p', 'no:helpconfig', - cannot be disabled all_args = additional_pytest_args + plugins_arg # print(all_args, file=sys.__stdout__) if self.child_config.enable_remote_debug: try: # Todo: this is too late to check for debugger existence. # Need verify before `debug` button click import pydevd_pycharm pydevd_pycharm.settrace( '127.0.0.1', suspend=False, port=self.child_config.remote_debug_port, stdoutToServer=True, stderrToServer=True) except ModuleNotFoundError as e: print('---\nFailed to import pydevd_pycharm', file=sys.__stdout__) print( ' Make sure you install pudb pycharm bindings by running:', file=sys.__stdout__) print('pip install pydevd-pycharm\n---', file=sys.__stdout__) raise pytest.main([fqn_test_to_run] + all_args, plugins=[plugin]) # pytest.main([fqn_test_to_run, '-qs'], plugins=[plugin]) if plugin.guess_run_status(test.name): execution_result.run_did_succeed() else: execution_result.run_did_fail() # x.stop() # logger.debug('after exec_module') except Exception as e: etype, value, current_traceback = sys.exc_info() execution_result.record_exception( etype=etype, value=value, current_traceback=current_traceback) execution_result.run_did_fail() last_call = -1 traceback.print_exception(etype=etype, value=value, tb=current_traceback, limit=last_call, file=sys.stdout) # traceback.print_exc(file=sys.stdout) # print(str(e)) logger.exception('Error while executing _run_test', exc_info=e) return execution_result