def startTest(self, test): test._pydev_start_time = time.time() if hasattr(test, 'address'): address = test.address() file, test = address[0], address[2] else: #multiprocess file, test = test pydev_runfiles_xml_rpc.notifyStartTest(file, test)
def startTest(self, test): test._pydev_start_time = time.time() if hasattr(test, 'address'): address = test.address() file, test = address[0], address[2] else: # multiprocess file, test = test pydev_runfiles_xml_rpc.notifyStartTest(file, test)
def startTest(self, test): _PythonTextTestResult.startTest(self, test) self.buf = pydevd_io.StartRedirect(keep_original_redirection=True, std='both') self.start_time = time.time() self._current_errors_stack = [] self._current_failures_stack = [] try: test_name = test.__class__.__name__+"."+test._testMethodName except AttributeError: #Support for jython 2.1 (__testMethodName is pseudo-private in the test case) test_name = test.__class__.__name__+"."+test._TestCase__testMethodName pydev_runfiles_xml_rpc.notifyStartTest( test.__pydev_pyfile__, test_name)
def pytest_runtestloop(self, session): if self._using_xdist: #Yes, we don't have the hooks we'd need to show the results in the pyunit view... #Maybe the plugin maintainer may be able to provide these additional hooks? return None #This mock will make all file representations to be printed as Pydev expects, #so that hyperlinks are properly created in errors. Note that we don't unmock it! self._MockFileRepresentation() #Based on the default run test loop: _pytest.session.pytest_runtestloop #but getting the times we need, reporting the number of tests found and notifying as each #test is run. start_total = time.time() try: pydev_runfiles_xml_rpc.notifyTestsCollected(len(session.session.items)) if session.config.option.collectonly: return True for item in session.session.items: filename = item.fspath.strpath test = item.location[2] start = time.time() pydev_runfiles_xml_rpc.notifyStartTest(filename, test) #Don't use this hook because we need the actual reports. #item.config.hook.pytest_runtest_protocol(item=item) reports = runner.runtestprotocol(item) delta = time.time() - start captured_output = '' error_contents = '' status = 'ok' for r in reports: if r.outcome not in ('passed', 'skipped'): #It has only passed, skipped and failed (no error), so, let's consider error if not on call. if r.when == 'setup': if status == 'ok': status = 'error' elif r.when == 'teardown': if status == 'ok': status = 'error' else: #any error in the call (not in setup or teardown) is considered a regular failure. status = 'fail' if hasattr(r, 'longrepr') and r.longrepr: rep = r.longrepr if hasattr(rep, 'reprcrash'): reprcrash = rep.reprcrash error_contents += str(reprcrash) error_contents += '\n' if hasattr(rep, 'reprtraceback'): error_contents += str(rep.reprtraceback) if hasattr(rep, 'sections'): for name, content, sep in rep.sections: error_contents += sep * 40 error_contents += name error_contents += sep * 40 error_contents += '\n' error_contents += content error_contents += '\n' self.reportCond(status, filename, test, captured_output, error_contents, delta) if session.shouldstop: raise session.Interrupted(session.shouldstop) finally: pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - start_total,)) return True
def pytest_runtest_setup(item): filename = item.fspath.strpath test = item.location[2] State.start_test_time = time.time() pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
def pytest_runtestloop(self, session): if self._using_xdist: #Yes, we don't have the hooks we'd need to show the results in the pyunit view... #Maybe the plugin maintainer may be able to provide these additional hooks? return None #This mock will make all file representations to be printed as Pydev expects, #so that hyperlinks are properly created in errors. Note that we don't unmock it! self._MockFileRepresentation() #Based on the default run test loop: _pytest.session.pytest_runtestloop #but getting the times we need, reporting the number of tests found and notifying as each #test is run. start_total = time.time() try: pydev_runfiles_xml_rpc.notifyTestsCollected(len(session.session.items)) if session.config.option.collectonly: return True for item in session.session.items: filename = item.fspath.strpath test = item.location[2] start = time.time() pydev_runfiles_xml_rpc.notifyStartTest(filename, test) #Don't use this hook because we need the actual reports. #item.config.hook.pytest_runtest_protocol(item=item) reports = runner.runtestprotocol(item) delta = time.time() - start captured_output = '' error_contents = '' status = 'ok' for r in reports: if r.outcome not in ('passed', 'skipped'): #It has only passed, skipped and failed (no error), so, let's consider error if not on call. if r.when == 'setup': if status == 'ok': status = 'error' elif r.when == 'teardown': if status == 'ok': status = 'error' else: #any error in the call (not in setup or teardown) is considered a regular failure. status = 'fail' if r.longrepr: rep = r.longrepr reprcrash = rep.reprcrash error_contents += str(reprcrash) error_contents += '\n' error_contents += str(rep.reprtraceback) for name, content, sep in rep.sections: error_contents += sep * 40 error_contents += name error_contents += sep * 40 error_contents += '\n' error_contents += content error_contents += '\n' self.reportCond(status, filename, test, captured_output, error_contents, delta) if session.shouldstop: raise session.Interrupted(session.shouldstop) finally: pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - start_total,)) return True
def notifyStartTest(self, job_id, *args, **kwargs): pydev_runfiles_xml_rpc.notifyStartTest(*args, **kwargs) return True