def __loadConfFile(): global base_url,testrunid,xsp1_port,xsp2_port,xsp4_port global xsp1_url,xsp2_url,xsp4_url,graffiti_url,apache_url global rc_server,rc_port,rc_browser global graffiti_port, apache_port, verbose,logfile global usexsp2,usexsp4,config conf_file = 'defaults.conf' conf_file_path = os.path.join(__getCommonDir(),conf_file) if not os.path.exists(conf_file_path): helpers.printColor("Warning: Cannot find conf file '%s'" % conf_file_path,'red') return config = ConfigParser.ConfigParser() config.read(conf_file_path) # Required settings base_url = __loadConfigOption('base_url') testrunid = __loadConfigOption('testrunid') testrunid = __stringToIntOrNone(testrunid) xsp1_port = __loadConfigOption('xsp1_port') xsp2_port = __loadConfigOption('xsp2_port') xsp4_port = __loadConfigOption('xsp4_port') graffiti_port = __loadConfigOption('graffiti_port') apache_port = __loadConfigOption('apache_port') rc_server = __loadConfigOption('rc_server') rc_port = __loadConfigOption('rc_port') rc_browser = __loadConfigOption('rc_browser') #Optional settings if config.has_option('main','verbose'): verbose = config.getboolean('main','verbose') logfile = __loadConfigOption('logfile') if logfile == 'None' or logfile == '': logfile = None
def runAllTests(self): '''Runs the tests are returns a dict of lists of passed and failed tests ''' aborted = False errors = {} # {testcaseid,[list of errors]} loader = unittest.TestLoader() testsuite = loader.loadTestsFromModule(__import__('__main__')) d = {'passed':[],'failed':[],'errors':[]} testsuite = self.__flattenTestSuite(testsuite) if self.runFailedOnly: testsuite = self.filterForFailedTestCases(testsuite) self.setTestCasesToRunning(testsuite) print "\nRunning %d tests\n" % testsuite.countTestCases() totalCount = testsuite.countTestCases() results = unittest.TestResult() skipped = 0 try: for i,t in enumerate(testsuite): print "Running %d of %d: %s ..." % ( i+1, totalCount, t.id()), sys.stdout.flush() failures = len(results.failures) errors = len(results.errors) #status = '' if helpers.myTestopia.isTestCaseInTestRun(t.testcaseid): t.run(results) if failures != len(results.failures): #Check if a failure was added helpers.printColor("FAILED",'red') d['failed'].append(t.testcaseid) #status = 'failed' d[t.testcaseid] = t.verificationErrors print results.failures[-1][1] # print the stack trace elif errors != len(results.errors): helpers.printColor("ERROR",'red') d['errors'].append(t.testcaseid) #status = 'failed' d[t.testcaseid] = t.verificationErrors print results.errors[-1][1] # print the stack trace else: print 'ok' d['passed'].append(t.testcaseid) #status = 'passed' #helpers.myTestopia.updateTestCaseViaThread(testcaseid=t.testcaseid,status=status,errorsList=t.verificationErrors) #print "done" # Get result from the results and print status else: helpers.printColor("skipped [%d]" % t.testcaseid, 'orange') skipped += 1 except KeyboardInterrupt: # The interrupted test run is counted as 'run' ie. 'passed' print "\n ** Aborting test run. Testopia will not be updated **" aborted = True resFailures = len(results.failures) resErrors = len(results.errors) resPassed = results.testsRun - (resFailures + resErrors) print "\n%12s:%3s" % ('Passed',resPassed) print "%12s:%3s" % ('Errors',resErrors) print "%12s:%3s" % ('Failures',resFailures) print "%12s:%3s" % ('Skipped',skipped) print "%12s:%3s\n" % ('Tests run',results.testsRun) return d