コード例 #1
0
    def run_tests(self, handle_coverage=True):
        """ runs all tests """
        sys.stdout.write("Finding files... ")
        files = self.find_import_files()
        if self.verbosity > 3:
            sys.stdout.write('%s ... done.\n' % (self.files_or_dirs))
        else:
            sys.stdout.write('done.\n')
        sys.stdout.write("Importing test modules ... ")

        if handle_coverage:
            coverage_files, coverage = StartCoverageSupport(self.configuration)

        file_and_modules_and_module_name = self.find_modules_from_files(files)
        sys.stdout.write("done.\n")

        all_tests = self.find_tests_from_modules(
            file_and_modules_and_module_name)
        all_tests = self.filter_tests(all_tests)

        import pydev_runfiles_unittest
        test_suite = pydev_runfiles_unittest.PydevTestSuite(all_tests)
        import pydev_runfiles_xml_rpc
        pydev_runfiles_xml_rpc.notifyTestsCollected(
            test_suite.countTestCases())

        start_time = time.time()

        def run_tests():
            executed_in_parallel = False
            if self.jobs > 1:
                import pydev_runfiles_parallel

                #What may happen is that the number of jobs needed is lower than the number of jobs requested
                #(e.g.: 2 jobs were requested for running 1 test) -- in which case ExecuteTestsInParallel will
                #return False and won't run any tests.
                executed_in_parallel = pydev_runfiles_parallel.ExecuteTestsInParallel(
                    all_tests, self.jobs, self.split_jobs, self.verbosity,
                    coverage_files, self.configuration.coverage_include)

            if not executed_in_parallel:
                #If in coverage, we don't need to pass anything here (coverage is already enabled for this execution).
                runner = pydev_runfiles_unittest.PydevTextTestRunner(
                    stream=sys.stdout,
                    descriptions=1,
                    verbosity=self.verbosity)
                sys.stdout.write('\n')
                runner.run(test_suite)

        if self.configuration.django:
            MyDjangoTestSuiteRunner(run_tests).run_tests([])
        else:
            run_tests()

        if handle_coverage:
            coverage.stop()
            coverage.save()

        total_time = 'Finished in: %.2f secs.' % (time.time() - start_time, )
        pydev_runfiles_xml_rpc.notifyTestRunFinished(total_time)
コード例 #2
0
    def run_tests(self, handle_coverage=True):
        """ runs all tests """
        sys.stdout.write("Finding files... ")
        files = self.find_import_files()
        if self.verbosity > 3:
            sys.stdout.write('%s ... done.\n' % (self.files_or_dirs))
        else:
            sys.stdout.write('done.\n')
        sys.stdout.write("Importing test modules ... ")
        

        if handle_coverage:
            coverage_files, coverage = StartCoverageSupport(self.configuration)
        
        file_and_modules_and_module_name = self.find_modules_from_files(files)
        sys.stdout.write("done.\n")
        
        all_tests = self.find_tests_from_modules(file_and_modules_and_module_name)
        if self.test_filter or self.tests:

            if self.test_filter:
                sys.stdout.write('Test Filter: %s\n' % ([p.pattern for p in self.test_filter],))

            if self.tests:
                sys.stdout.write('Tests to run: %s\n' % (self.tests,))

            all_tests = self.filter_tests(all_tests)
            
        test_suite = pydev_runfiles_unittest.PydevTestSuite(all_tests)
        import pydev_runfiles_xml_rpc
        pydev_runfiles_xml_rpc.notifyTestsCollected(test_suite.countTestCases())
        
        executed_in_parallel = False
        start_time = time.time()
        if self.jobs > 1:
            import pydev_runfiles_parallel
            
            #What may happen is that the number of jobs needed is lower than the number of jobs requested
            #(e.g.: 2 jobs were requested for running 1 test) -- in which case ExecuteTestsInParallel will
            #return False and won't run any tests.
            executed_in_parallel = pydev_runfiles_parallel.ExecuteTestsInParallel(
                all_tests, self.jobs, self.split_jobs, self.verbosity, coverage_files, self.configuration.coverage_include)
            
        if not executed_in_parallel:
            #If in coverage, we don't need to pass anything here (coverage is already enabled for this execution).
            runner = pydev_runfiles_unittest.PydevTextTestRunner(stream=sys.stdout, descriptions=1, verbosity=self.verbosity)
            sys.stdout.write('\n')
            runner.run(test_suite)
            
        if handle_coverage:
            coverage.stop()
            coverage.save()
        
        total_time = 'Finished in: %.2f secs.' % (time.time() - start_time,)
        pydev_runfiles_xml_rpc.notifyTestRunFinished(total_time)
コード例 #3
0
 def pytest_runtestloop(self, session):
     if self._using_xdist:
         #Yes, we don't have the hooks we'd need to show the results in the pyunit view...
         #Maybe the plugin maintainer may be able to provide these additional hooks?
         return None
     
     #This mock will make all file representations to be printed as Pydev expects, 
     #so that hyperlinks are properly created in errors. Note that we don't unmock it!
     self._MockFileRepresentation()
     
     #Based on the default run test loop: _pytest.session.pytest_runtestloop
     #but getting the times we need, reporting the number of tests found and notifying as each
     #test is run.
     
     start_total = time.time()
     try:
         pydev_runfiles_xml_rpc.notifyTestsCollected(len(session.session.items))
         
         if session.config.option.collectonly:
             return True
         
         for item in session.session.items:
             
             filename = item.fspath.strpath
             test = item.location[2]
             start = time.time()
             
             pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
             
             #Don't use this hook because we need the actual reports.
             #item.config.hook.pytest_runtest_protocol(item=item)
             reports = runner.runtestprotocol(item)
             delta = time.time() - start
             
             captured_output = ''
             error_contents = ''
             
             
             status = 'ok'
             for r in reports:
                 if r.outcome not in ('passed', 'skipped'):
                     #It has only passed, skipped and failed (no error), so, let's consider error if not on call.
                     if r.when == 'setup':
                         if status == 'ok':
                             status = 'error'
                         
                     elif r.when == 'teardown':
                         if status == 'ok':
                             status = 'error'
                         
                     else:
                         #any error in the call (not in setup or teardown) is considered a regular failure.
                         status = 'fail'
                     
                 if hasattr(r, 'longrepr') and r.longrepr:
                     rep = r.longrepr
                     if hasattr(rep, 'reprcrash'):
                         reprcrash = rep.reprcrash
                         error_contents += str(reprcrash)
                         error_contents += '\n'
                         
                     if hasattr(rep, 'reprtraceback'):
                         error_contents += str(rep.reprtraceback)
                         
                     if hasattr(rep, 'sections'):
                         for name, content, sep in rep.sections:
                             error_contents += sep * 40 
                             error_contents += name 
                             error_contents += sep * 40 
                             error_contents += '\n'
                             error_contents += content 
                             error_contents += '\n'
             
             self.reportCond(status, filename, test, captured_output, error_contents, delta)
             
             if session.shouldstop:
                 raise session.Interrupted(session.shouldstop)
     finally:
         pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - start_total,))
     return True
コード例 #4
0
ファイル: pydev_runfiles_pytest2.py プロジェクト: jheiv/Pydev
def pytest_collection_modifyitems():
    connect_to_server_for_communication_to_xml_rpc_on_xdist()
    pydev_runfiles_xml_rpc.notifyTestsCollected(State.numcollected)
    State.numcollected = 0
コード例 #5
0
 def pytest_runtestloop(self, session):
     if self._using_xdist:
         #Yes, we don't have the hooks we'd need to show the results in the pyunit view...
         #Maybe the plugin maintainer may be able to provide these additional hooks?
         return None
     
     #This mock will make all file representations to be printed as Pydev expects, 
     #so that hyperlinks are properly created in errors. Note that we don't unmock it!
     self._MockFileRepresentation()
     
     #Based on the default run test loop: _pytest.session.pytest_runtestloop
     #but getting the times we need, reporting the number of tests found and notifying as each
     #test is run.
     
     start_total = time.time()
     try:
         pydev_runfiles_xml_rpc.notifyTestsCollected(len(session.session.items))
         
         if session.config.option.collectonly:
             return True
         
         for item in session.session.items:
             
             filename = item.fspath.strpath
             test = item.location[2]
             start = time.time()
             
             pydev_runfiles_xml_rpc.notifyStartTest(filename, test)
             
             #Don't use this hook because we need the actual reports.
             #item.config.hook.pytest_runtest_protocol(item=item)
             reports = runner.runtestprotocol(item)
             delta = time.time() - start
             
             captured_output = ''
             error_contents = ''
             
             
             status = 'ok'
             for r in reports:
                 if r.outcome not in ('passed', 'skipped'):
                     #It has only passed, skipped and failed (no error), so, let's consider error if not on call.
                     if r.when == 'setup':
                         if status == 'ok':
                             status = 'error'
                         
                     elif r.when == 'teardown':
                         if status == 'ok':
                             status = 'error'
                         
                     else:
                         #any error in the call (not in setup or teardown) is considered a regular failure.
                         status = 'fail'
                     
                 if r.longrepr:
                     rep = r.longrepr
                     reprcrash = rep.reprcrash
                     error_contents += str(reprcrash)
                     error_contents += '\n'
                     error_contents += str(rep.reprtraceback)
                     for name, content, sep in rep.sections:
                         error_contents += sep * 40 
                         error_contents += name 
                         error_contents += sep * 40 
                         error_contents += '\n'
                         error_contents += content 
                         error_contents += '\n'
             
             self.reportCond(status, filename, test, captured_output, error_contents, delta)
             
             if session.shouldstop:
                 raise session.Interrupted(session.shouldstop)
     finally:
         pydev_runfiles_xml_rpc.notifyTestRunFinished('Finished in: %.2f secs.' % (time.time() - start_total,))
     return True
         
コード例 #6
0
def pytest_collection_modifyitems():
    connect_to_server_for_communication_to_xml_rpc_on_xdist()
    pydev_runfiles_xml_rpc.notifyTestsCollected(State.numcollected)
    State.numcollected = 0