Example #1
0
    def handleUpdate(self, index, tr):
        if self.progressBar:
            if tr.failed():
                self.progressBar.clear()
            else:
                # Force monotonicity
                self.progress = max(self.progress, float(index)/self.numTests)
                self.progressBar.update(self.progress, tr.path)
                return
        elif self.opts.succinct:
            if not tr.failed():
                sys.stdout.write('.')
                sys.stdout.flush()
                return
            else:
                sys.stdout.write('\n')

        extra = ''
        if tr.code==TestStatus.Invalid:
            extra = ' - (Invalid test)'
        elif tr.code==TestStatus.NoRunLine:
            extra = ' - (No RUN line)'
        elif tr.failed():
            extra = ' - %s'%(TestStatus.getName(tr.code).upper(),)
        print '%*d/%*d - %s%s'%(self.digits, index+1, self.digits, 
                              self.numTests, tr.path, extra)

        if tr.failed() and self.opts.showOutput:
            TestRunner.cat(tr.testResults, sys.stdout)
def main():
    ''' main test logic '''
    print("TestRunner.py - main")

    if Configuration.DEBUG == True:
        print("Debug messaging is ON")
        logLevel = logging.DEBUG
    else:
        print("Debug messaging is OFF")
        logLevel = logging.INFO

    # setup logger
    if not logFileFromBAT == None:
        Configuration.Logger = UnitTestUtilities.initializeLogger(logFileFromBAT, logLevel)
    else:
        Configuration.GetLogger(logLevel)

    print("Logging results to: " + str(Configuration.LoggerFile))
    UnitTestUtilities.setUpLogFileHeader()

    result = runTestSuite()

    TestRunner.logTestResults(result)

    return result.wasSuccessful()
Example #3
0
    def handleUpdate(self, index, tr):
        if self.progressBar:
            if tr.failed():
                self.progressBar.clear()
            else:
                # Force monotonicity
                self.progress = max(self.progress,
                                    float(index) / self.numTests)
                self.progressBar.update(self.progress, tr.path)
                return
        elif self.opts.succinct:
            if not tr.failed():
                sys.stdout.write('.')
                sys.stdout.flush()
                return
            else:
                sys.stdout.write('\n')

        extra = ''
        if tr.code == TestStatus.Invalid:
            extra = ' - (Invalid test)'
        elif tr.code == TestStatus.NoRunLine:
            extra = ' - (No RUN line)'
        elif tr.failed():
            extra = ' - %s' % (TestStatus.getName(tr.code).upper(), )
        print '%*d/%*d - %s%s' % (self.digits, index + 1, self.digits,
                                  self.numTests, tr.path, extra)

        if tr.failed() and self.opts.showOutput:
            TestRunner.cat(tr.testResults, sys.stdout)
Example #4
0
class Tester(threading.Thread):
    def __init__(self, provider):
        threading.Thread.__init__(self)
        self.provider = provider

    def run(self):
        while 1:
            item = self.provider.get()
            if item is None:
                break
            self.runTest(item)

    def runTest(self, (path, index)):
        base = TestRunner.getTestOutputBase('Output', path)
        numTests = len(self.provider.tests)
        digits = len(str(numTests))
        code = None
        elapsed = None
        try:
            opts = self.provider.opts
            startTime = time.time()
            code, output = TestRunner.runOneTest(self.provider.config, path,
                                                 base)
            elapsed = time.time() - startTime
        except KeyboardInterrupt:
            # This is a sad hack. Unfortunately subprocess goes
            # bonkers with ctrl-c and we start forking merrily.
            print '\nCtrl-C detected, goodbye.'
            os.kill(0, 9)

        self.provider.setResult(index, TestResult(path, code, output, elapsed))
def main():
    ''' main test logic '''
    print("TestRunner.py - main")

    if Configuration.DEBUG == True:
        print("Debug messaging is ON")
        logLevel = logging.DEBUG
    else:
        print("Debug messaging is OFF")
        logLevel = logging.INFO

    # setup logger
    if not logFileFromBAT == None:
        Configuration.Logger = UnitTestUtilities.initializeLogger(
            logFileFromBAT, logLevel)
    else:
        Configuration.GetLogger(logLevel)

    print("Logging results to: " + str(Configuration.LoggerFile))
    UnitTestUtilities.setUpLogFileHeader()

    result = runTestSuite()

    TestRunner.logTestResults(result)

    return result.wasSuccessful()
    def execute(self, test, litConfig):
        if test.config.unsupported:
            return (Test.UNSUPPORTED, 'Test is unsupported')

        cmd = list(self.command)

        # If using temp input, create a temporary file and hand it to the
        # subclass.
        if self.useTempInput:
            tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
            self.createTempInput(tmp, test)
            tmp.flush()
            cmd.append(tmp.name)
        else:
            cmd.append(test.source_path)

        out, err, exitCode = TestRunner.executeCommand(cmd)

        diags = out + err
        if not exitCode and not diags.strip():
            return Test.PASS, ''

        # Try to include some useful information.
        report = """Command: %s\n""" % ' '.join(["'%s'" % a for a in cmd])
        if self.useTempInput:
            report += """Temporary File: %s\n""" % tmp.name
            report += "--\n%s--\n" "" % open(tmp.name).read()
        report += """Output:\n--\n%s--""" % diags

        return Test.FAIL, report
Example #7
0
    def execute(self, test, litConfig):
        if test.config.unsupported:
            return (Test.UNSUPPORTED, 'Test is unsupported')

        cmd = list(self.command)

        # If using temp input, create a temporary file and hand it to the
        # subclass.
        if self.useTempInput:
            tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
            self.createTempInput(tmp, test)
            tmp.flush()
            cmd.append(tmp.name)
        elif hasattr(test, 'source_path'):
            cmd.append(test.source_path)
        else:
            cmd.append(test.getSourcePath())

        out, err, exitCode = TestRunner.executeCommand(cmd)

        diags = out + err
        if not exitCode and not diags.strip():
            return Test.PASS,''

        # Try to include some useful information.
        report = """Command: %s\n""" % ' '.join(["'%s'" % a
                                                 for a in cmd])
        if self.useTempInput:
            report += """Temporary File: %s\n""" % tmp.name
            report += "--\n%s--\n""" % open(tmp.name).read()
        report += """Output:\n--\n%s--""" % diags

        return Test.FAIL, report
Example #8
0
    def handleUpdate(self, index, tr):
        if self.progressBar:
            if tr.failed():
                self.progressBar.clear()
            else:
                # Force monotonicity
                self.progress = max(self.progress, float(index)/self.numTests)
                self.progressBar.update(self.progress, tr.path)
                return
        elif self.opts.succinct:
            if not tr.failed():
                sys.stdout.write('.')
                sys.stdout.flush()
                return
            else:
                sys.stdout.write('\n')

        extra = ''
        if tr.code==TestStatus.Invalid:
            extra = ' - (Invalid test)'
        elif tr.code==TestStatus.NoRunLine:
            extra = ' - (No RUN line)'
        elif tr.failed():
            extra = ' - %s'%(TestStatus.getName(tr.code).upper(),)
        print '%*d/%*d - %s%s'%(self.digits, index+1, self.digits, 
                              self.numTests, tr.path, extra)

        if tr.failed():
            msgs = []
            if tr.warnings:
                msgs.append('%d warnings'%(len(tr.warnings),))
            if tr.errors:
                msgs.append('%d errors'%(len(tr.errors),))
            if tr.assertions:
                msgs.append('%d assertions'%(len(tr.assertions),))
            
            if msgs:
                print '\tFAIL (%s)'%(', '.join(msgs))
            for i,error in enumerate(set([e for (_,_,_,e) in tr.errors])):
                print '\t\tERROR: %s'%(error,)
                if i>20:
                    print '\t\t\t(too many errors, skipping)'
                    break
            for assertion in set(tr.assertions):
                print '\t\tASSERTION: %s'%(assertion,)
            if self.opts.showOutput:
                TestRunner.cat(tr.testResults, sys.stdout)
Example #9
0
def isLanguageSupported(language, compiler):
  # How to run the compiler.  Additional arguments are added below.
  args = [compiler, '-S', '-o', os.devnull]

  if language == 'java':
    # GCC can't compile Java source by itself, it can only compile class files.
    script_dir = os.path.dirname(os.path.realpath(__file__))
    source = os.path.join(script_dir, 'e.class')
    # Java is supported if the class file compiles without error.
    out,err,exitCode = TestRunner.executeCommand(args +
                                                 [source, '-fuse-boehm-gc'])
    return exitCode == 0

  if language == 'ada':
    suffix='.ads'
  elif language == 'c':
    suffix='.c'
  elif language == 'c++':
    suffix='.cpp'
  elif language == 'fortran':
    suffix='.f'
  elif language == 'go':
    suffix='.go'
  elif language == 'objective-c':
    suffix='.m'
  elif language == 'objective-c++':
    suffix='.mm'
  else:
    return False

  # For most languages it suffices to try compiling an empty file however for
  # Ada and Go an empty file is not a valid compilation unit.
  source = tempfile.NamedTemporaryFile(mode='w+t', suffix=suffix)

  if language == 'ada':
    # Use an obscure package name, as if the package is called XYZ and a file
    # called XYZ.adb exists then the test will fail.
    source.write('package U5TE4J886W is end;\n')
  elif language == 'go':
    source.write('package main\n')

  # If something was written then ensure it is visible to the compiler process.
  source.flush()

  # The language is supported if the file compiles without error.
  out,err,exitCode = TestRunner.executeCommand(args + [source.name])
  return exitCode == 0
Example #10
0
def generateFortranModules(cmd, srcPath, OutputDir):
    # Fortran 90 code often fails to compile because it needs modules defined by
    # other files in the same directory.  If this seems to be happening then try
    # to generate all of the required modules by compiling every Fortran file in
    # the same directory.
    srcDir,srcBase = os.path.split(srcPath)
    cmd = cmd + ['-I', srcDir, '-fsyntax-only']

    # If the file compiles OK or isn't failing because of lacking modules then
    # there is no point in trying to generate modules.
    out,err,exitCode = TestRunner.executeCommand(cmd + [srcPath], OutputDir)
    if exitCode == 0 or err is None or "Can't open module file" not in err:
        return

    # Drat, it fails to compile.  Generate modules for every Fortran file in the
    # source directory.
    fortranSuffixes = DEUtils.getSuffixesForLanguage('fortran')
    filesToCompile = []
    for filename in os.listdir(srcDir):
        filepath = os.path.join(srcDir, filename)
        if not os.path.isdir(filepath):
            base,ext = os.path.splitext(filename)
            if ext in fortranSuffixes:
                filesToCompile.append(filepath)

    # Compile every file, returning triumphantly once the original file manages
    # to compile, or giving up miserably if no progress is being made.
    newFilesToCompile = []
    while filesToCompile != newFilesToCompile:
        newFilesToCompile = []
        # Compile each file in turn.
        for path in filesToCompile:
            out,err,exitCode = TestRunner.executeCommand(cmd + [path], OutputDir)
            if exitCode != 0 and err is not None and "Can't open module file" in err:
                # It failed to compile due to a missing module.  Remember it for
                # the next round.
                newFilesToCompile.append(path);
            elif path == srcPath:
                # The original file compiled, or at least didn't fail to compile
                # due to a lacking module.  Return triumphantly!
                return
        # Arrange for the next iteration to compile the files that were missing
        # modules this time round.
        filesToCompile, newFilesToCompile = newFilesToCompile, filesToCompile

    # The set of files missing modules didn't change, give up miserably.
    return
Example #11
0
class Tester(threading.Thread):
    def __init__(self, provider):
        threading.Thread.__init__(self)
        self.provider = provider

    def run(self):
        while 1:
            item = self.provider.get()
            if item is None:
                break
            self.runTest(item)

    def runTest(self, (path, index)):
        command = path
        # Use hand concatentation here because we want to override
        # absolute paths.
        output = 'Output/' + path + '.out'
        testname = path
        testresults = 'Output/' + path + '.testresults'
        TestRunner.mkdir_p(os.path.dirname(testresults))
        numTests = len(self.provider.tests)
        digits = len(str(numTests))
        code = None
        try:
            opts = self.provider.opts
            if opts.debugDoNotTest:
                code = None
            else:
                code = TestRunner.runOneTest(path,
                                             command,
                                             output,
                                             testname,
                                             opts.clang,
                                             useValgrind=opts.useValgrind,
                                             useDGCompat=opts.useDGCompat,
                                             useScript=opts.testScript,
                                             output=open(testresults, 'w'))
        except KeyboardInterrupt:
            # This is a sad hack. Unfortunately subprocess goes
            # bonkers with ctrl-c and we start forking merrily.
            print 'Ctrl-C detected, goodbye.'
            os.kill(0, 9)

        self.provider.setResult(index, TestResult(path, code, testresults))
Example #12
0
    def execute(self, test, litConfig):
        execdir = os.path.dirname(test.getExecPath())
        tmpBase  = self.getTmpBase(test, litConfig)

        res = self.getTestScript(test, litConfig, self.default_script)
        if len(res) == 2:
            return res
        s, script, isXFail = res

        script = self.applyScriptSubstitutions(test, litConfig, script,
                tmpBase, normalize_slashes=self.execute_external)

        return TestRunner.executeShTest(test, litConfig, script,
                                        isXFail, tmpBase, execdir,
                                        self.execute_external)
Example #13
0
    def execute(self, test, litConfig):
        testPath, testName = os.path.split(test.getSourcePath())
        while not os.path.exists(testPath):
            # Handle GTest parametrized and typed tests, whose name includes
            # some '/'s.
            testPath, namePrefix = os.path.split(testPath)
            testName = os.path.join(namePrefix, testName)

        cmd = [testPath, "--gtest_filter=" + testName]
        out, err, exitCode = TestRunner.executeCommand(cmd)

        if not exitCode:
            return Test.PASS, ""

        return Test.FAIL, out + err
Example #14
0
    def execute(self, test, litConfig):
        testPath, testName = os.path.split(test.getSourcePath())
        while not os.path.exists(testPath):
            # Handle GTest parametrized and typed tests, whose name includes
            # some '/'s.
            testPath, namePrefix = os.path.split(testPath)
            testName = os.path.join(namePrefix, testName)

        cmd = [testPath, '--gtest_filter=' + testName]
        out, err, exitCode = TestRunner.executeCommand(cmd)

        if not exitCode:
            return Test.PASS, ''

        return Test.FAIL, out + err
Example #15
0
    def execute(self, test, litConfig):
        litConfig.ignoreStdErr = self.ignoreStdErr

        execdir = os.path.dirname(test.getExecPath())
        tmpBase  = self.getTmpBase(test, litConfig)

        res = self.getTestScript(test, litConfig)
        if len(res) == 2:
            return res
        s, script, isXFail = res

        script = self.applyScriptSubstitutions(test, litConfig, script,
                tmpBase, normalize_slashes=TestRunner.kIsWindows)

        return TestRunner.executeTclTest(test, litConfig, script,
                                         isXFail, tmpBase, execdir)
Example #16
0
    def execute(self, test, litConfig):
        testPath,testName = os.path.split(test.getSourcePath())
        while not os.path.exists(testPath):
            # Handle GTest parametrized and typed tests, whose name includes
            # some '/'s.
            testPath, namePrefix = os.path.split(testPath)
            testName = os.path.join(namePrefix, testName)

        cmd = [testPath, '--gtest_filter=' + testName]
        if litConfig.useValgrind:
            cmd = litConfig.valgrindArgs + cmd

        out, err, exitCode = TestRunner.executeCommand(
            cmd, env=test.config.environment)

        if not exitCode:
            return Test.PASS,''

        return Test.FAIL, out + err
Example #17
0
    def execute(self, test, litConfig):
        testPath,testName = os.path.split(test.getSourcePath())
        while not os.path.exists(testPath):
            # Handle GTest parametrized and typed tests, whose name includes
            # some '/'s.
            testPath, namePrefix = os.path.split(testPath)
            testName = os.path.join(namePrefix, testName)

        cmd = [testPath, '--gtest_filter=' + testName]
        if litConfig.useValgrind:
            cmd = litConfig.valgrindArgs + cmd

        if litConfig.noExecute:
            return Test.PASS, ''

        out, err, exitCode = TestRunner.executeCommand(
            cmd, env=test.config.environment)

        if not exitCode:
            return Test.PASS,''

        return Test.FAIL, out + err
Example #18
0
    def execute(self, test, litConfig):
        if test.config.unsupported:
            return (Test.UNSUPPORTED, 'Test is unsupported')

        cmd = list(self.command)

        # If using temp input, create a temporary file and hand it to the
        # subclass.
        if self.useTempInput:
            tmp = tempfile.NamedTemporaryFile(suffix='.cpp')
            self.createTempInput(tmp, test)
            tmp.flush()
            cmd.append(tmp.name)
        elif hasattr(test, 'source_path'):
            cmd.append(test.source_path)
        else:
            cmd.append(test.getSourcePath())

        out, err, exitCode = TestRunner.executeCommand(cmd)

        if not exitCode and (self.allowStdout or not out.strip()) and \
                            (self.allowStderr or not err.strip()):
            status = Test.PASS
        else:
            status = Test.FAIL

        if status == Test.FAIL or litConfig.showAllOutput:
            # Try to include some useful information.
            report = """Command: %s\n""" % ' '.join(["'%s'" % a
                                                     for a in cmd])
            if self.useTempInput:
                report += """Temporary File: %s\n""" % tmp.name
                report += "--\n%s--\n""" % open(tmp.name).read()
            report += """Command Output (stdout):\n--\n%s--\n""" % out
            report += """Command Output (stderr):\n--\n%s--\n""" % err
        else:
            report = ""

        return status, report
Example #19
0
def dpdkick_main():

    dpdkick_init()

    if(globalvar.ENV_CONF_TYPE == "performance"):
        hw_conf_test_suite = unittest.TestLoader().loadTestsFromTestCase(hwconftest.hwconftest)
        sw_conf_test_suite = unittest.TestLoader().loadTestsFromTestCase(swconftest.swconftest)
        kernel_conf_test_suite = unittest.TestLoader().loadTestsFromTestCase(kernelconftest.kernelconftest)
    else:
        print "Invalid DPDKick Configuration Type"
        print "program exit.."
        raise SystemExit

    runner = TestRunner.TestRunner()

    util.format_print_test_suite_title('Hardware Configuration Verification')
    runner.run(hw_conf_test_suite, description = 'Hardware')

    util.format_print_test_suite_title('Software Configuration and Runtime Verification')
    runner.run(sw_conf_test_suite, description = 'Software')

    util.format_print_test_suite_title('Kernel Configuration Verification')
    runner.run(kernel_conf_test_suite, description = 'Kernel')
Example #20
0
    def execute(self, test, litConfig):
        testPath,testName = os.path.split(test.getSourcePath())
        while not os.path.exists(testPath):
            # Handle GTest parametrized and typed tests, whose name includes
            # some '/'s.
            testPath, namePrefix = os.path.split(testPath)
            testName = os.path.join(namePrefix, testName)

        cmd = [testPath, '--gtest_filter=' + testName]
        if litConfig.useValgrind:
            valgrindArgs = ['valgrind', '-q',
                            '--tool=memcheck', '--trace-children=yes',
                            '--error-exitcode=123']
            valgrindArgs.extend(litConfig.valgrindArgs)

            cmd = valgrindArgs + cmd

        out, err, exitCode = TestRunner.executeCommand(
            cmd, env=test.config.environment)
            
        if not exitCode:
            return Test.PASS,''

        return Test.FAIL, out + err
Example #21
0
 def execute(self, test, litConfig):
     return TestRunner.executeShTest(test, litConfig,
                                     self.execute_external)
Example #22
0
            parser.error(e.args[0])

    cfg = TestingConfig.frompath(opts.config)

    # Update the configuration based on the command line arguments.
    for name in ('PATH','SYSTEMROOT'):
        if name in cfg.environment:
            parser.error("'%s' should not be set in configuration!" % name)

    cfg.root = opts.root
    cfg.environment['PATH'] = os.pathsep.join(opts.path + 
                                                 [os.environ.get('PATH','')])
    cfg.environment['SYSTEMROOT'] = os.environ.get('SYSTEMROOT','')

    if opts.jlang is None:
        opts.jlang = TestRunner.inferJlang(cfg)
    if opts.jlangcc is None:
        opts.jlangcc = TestRunner.inferJlangCC(cfg, opts.jlang)

    cfg.jlang = opts.jlang
    cfg.jlangcc = opts.jlangcc
    cfg.useValgrind = opts.useValgrind
    cfg.useExternalShell = opts.useExternalShell

    # FIXME: It could be worth loading these in parallel with testing.
    allTests = list(getTests(cfg, args))
    allTests.sort()
    
    tests = allTests
    if opts.shuffle:
        random.shuffle(tests)
 def execute(self, test, litConfig):
     return TestRunner.executeTclTest(test, litConfig)
 def execute(self, test, litConfig):
     return TestRunner.executeShTest(test, litConfig, self.execute_external)
Example #25
0
 def execute(self, test, litConfig):
     litConfig.ignoreStdErr = self.ignoreStdErr
     return TestRunner.executeTclTest(test, litConfig)
Example #26
0
tests = [
    ("testExecs/iotest.exe", "", {}),
]

longTests = []

if __name__ == '__main__':
    import sys
    import TestRunner
    failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
    sys.exit(len(failed))
Example #27
0
    _descriptionPath = os.path.join(_curentdatapath, "description.csv")
    _dataPath = os.path.join(_curentdatapath, "data.csv")

    # read description, it contain fiels named and field values. include clas column
    description = pd.read_csv(_descriptionPath, dtype=str)
    # create dictionary of fields and its values as a list
    attDict = dict(
        zip(list(description),
            [_vlues.split(",")
             for _vlues in (description.values.tolist()[0])]))
    # load data and assign columns from description (nodelist)
    data = pd.read_csv(_dataPath, names=list(attDict.keys()), dtype=str)

    data = data.sample(frac=1).reset_index(drop=True)

    PrunedMetrics = TestRunner.RunPrunedTreeTest(data, attDict, 0.2, 0.2)

    trainTestMetrics = TestRunner.RunTrainTest(data, attDict, 0.66)
    Fold3Metrics = TestRunner.RunKfold(data, attDict, 3)
    Fold5Metrics = TestRunner.RunKfold(data, attDict, 5)

    with open(os.path.join(_curentdatapath, "Results.txt"), "w") as text_file:
        print("trainTestMetrics: {0},{1},{2}".format(*trainTestMetrics),
              file=text_file)
        print("Fold3Metrics: {0},{1},{2}".format(*Fold3Metrics),
              file=text_file)
        print("Fold5Metrics: {0},{1},{2}".format(*Fold5Metrics),
              file=text_file)

        print(
            "BeforPruneMetrics: {0},{1},{2}, ".format(*PrunedMetrics[0]) +
Example #28
0
            parser.error(e.args[0])

    cfg = TestingConfig.frompath(opts.config)

    # Update the configuration based on the command line arguments.
    for name in ('PATH', 'SYSTEMROOT'):
        if name in cfg.environment:
            parser.error("'%s' should not be set in configuration!" % name)

    cfg.root = opts.root
    cfg.environment['PATH'] = os.pathsep.join(opts.path +
                                              [os.environ.get('PATH', '')])
    cfg.environment['SYSTEMROOT'] = os.environ.get('SYSTEMROOT', '')

    if opts.clang is None:
        opts.clang = TestRunner.inferClang(cfg)
    if opts.clangcc is None:
        opts.clangcc = TestRunner.inferClangCC(cfg, opts.clang)

    cfg.clang = opts.clang
    cfg.clangcc = opts.clangcc
    cfg.useValgrind = opts.useValgrind
    cfg.useExternalShell = opts.useExternalShell

    # FIXME: It could be worth loading these in parallel with testing.
    allTests = list(getTests(cfg, args))
    allTests.sort()

    tests = allTests
    if opts.shuffle:
        random.shuffle(tests)
Example #29
0
 def execute(self, test, litConfig):
     return TestRunner.executeTclTest(test, litConfig)
Example #30
0
            parser.error(e.args[0])

    cfg = TestingConfig.frompath(opts.config)

    # Update the configuration based on the command line arguments.
    for name in ('PATH','SYSTEMROOT'):
        if name in cfg.environment:
            parser.error("'%s' should not be set in configuration!" % name)

    cfg.root = opts.root
    cfg.environment['PATH'] = os.pathsep.join(opts.path + 
                                                 [os.environ.get('PATH','')])
    cfg.environment['SYSTEMROOT'] = os.environ.get('SYSTEMROOT','')

    if opts.clang is None:
        opts.clang = TestRunner.inferClang(cfg)
    if opts.clangcc is None:
        opts.clangcc = TestRunner.inferClangCC(cfg, opts.clang)

    cfg.clang = opts.clang
    cfg.clangcc = opts.clangcc
    cfg.useValgrind = opts.useValgrind
    cfg.useExternalShell = opts.useExternalShell

    # FIXME: It could be worth loading these in parallel with testing.
    allTests = list(getTests(cfg, args))
    allTests.sort()
    
    tests = allTests
    if opts.shuffle:
        random.shuffle(tests)
Example #31
0
def main(argv):
    global config
    global pbar
    global test_index

    lcfgPath = os.path.abspath(os.path.dirname(sys.argv[0])) + '\logging.cfg'
    logging.config.fileConfig(lcfgPath,
                              defaults=None,
                              disable_existing_loggers=True)
    logger = logging.getLogger(__name__)
    logger.debug('Test Report Generator Validation')

    # When this module is executed from the command line, run all its tests
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'hc:v:r:m::x:s:', [
            'help', 'config=', 'verbose=', 'report=', 'mode=', 'repeat=',
            'suite='
        ])
    except getopt.GetoptError as err:
        # print help information and exit:
        logger.error(
            str(err))  # will print something like 'option -a not recognized
        logger.error('option not recognized')
        usage()
        sys.exit(2)

    mode = None
    repeat = 1
    report_fn = None
    suite_fn = None
    config_fn = None
    config = None

    for o, a in opts:
        if o in ('-v', '--verbose'):
            valid_levels = [
                'NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
            ]

            logger_levels = [
                logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
                logging.ERROR, logging.CRITICAL
            ]
            verbose_level = a
            if verbose_level in valid_levels:
                i = valid_levels.index(verbose_level)
                logger.setLevel(logger_levels[i])
            else:
                usage()
                sys.exit()

        elif o in ('-h', '--help'):
            usage()
            sys.exit()
        elif o in ('-c', '--config'):
            config_fn = a
            if os.path.exists(config_fn) is False:
                logger.error('The specified config file %s does not exist' %
                             config_fn)
                sys.exit(3)
        elif o in ('-r', '--report'):
            report_fn = a
        elif o in ('-m', '--mode'):
            mode = a
        elif o in ('-x', '--repeat'):
            repeat = int(a)
        elif o in ('-s', '--suite'):
            suite_fn = a
            if os.path.exists(suite_fn) is False:
                logger.error(
                    'The specified test suite file %s does not exist' %
                    suite_fn)
                sys.exit(3)
        else:
            assert False, 'unhandled option'

    #
    # handle configuration if specified on command line
    #
    if config_fn:

        logger.info('Handle configuration file')
        config = configparser.ConfigParser()
        try:
            config.read(config_fn)
        except:
            logger.error('we failed')

        logger.debug(config)
        logger.debug(config.sections())
        for c in config.sections():
            logger.debug(c)

    else:
        logger.info('Handle command line args only')

    if mode == 'HTML':
        logger.info('use the TestReportGen module')
        import TestRunner
        res = []
        with open('TestReportGenValidation_report.html', 'wb') as f:
            runner = TestRunner.TestRunner(
                stream=f,
                verbosity=19,
                title='Test Report Generator Validation',
                description='Test Report Generator Validation HTML report')
            for i in range(repeat):
                r = runner.run(TestReportGenTestSuite(suite_fn), i)
                res.append(r)
            f.close()

    else:
        # do report in text mode with the eventual report file name option
        logger.info('use Text mode')
        if report_fn:
            with open(report_fn, 'w', encoding='utf-8') as f:
                for i in range(repeat):
                    unittest.TextTestRunner(stream=f, verbosity=19).run(
                        TestReportGenTestSuite(suite_fn), i)
            f.close()
        else:
            with open('TestReportGen_result.txt', 'w', encoding='utf-8') as f:
                for i in range(repeat):
                    unittest.TextTestRunner(stream=f, verbosity=19).run(
                        TestReportGenTestSuite(suite_fn), i)
            f.close()

    return 0
Example #32
0
 def execute(self, test, litConfig):
     litConfig.ignoreStdErr = self.ignoreStdErr
     return TestRunner.executeTclTest(test, litConfig)
Example #33
0
        json(data, test_path)
        self.assertTrue(isfile(test_path))

    def test_json_update(self):
        '''
        Tests that a JSON file gets updated.
        '''
        data = {}
        test_path = './testing_json.json'
        json(data, test_path)

        updated_data = {'update_test': True}
        json(updated_data, test_path, overwrite=False)

        store = StoreAdapter(method='JSON')
        d = store.load_json(test_path)
        self.assertEqual(d, updated_data)

    def tearDown(self):
        try:
            remove('./testing_json.json')
        except:
            pass
        try:
            remove('./testing_csv.csv')
        except:
            pass


TestRunner.main()
Example #34
0
 def executeOne(cmd):
     return TestRunner.executeCommand(cmd + args, cwd)