def RunTestCase(name, lang, type): baseName = util.ExtractName(name) #compile and run compiler = cmdline.LookUpWildCard('metaiv', lang, type, 'compiler') flags = cmdline.LookUpWildCard('metaiv', lang, type, 'cflags') cmd = compiler + " -o driver driver.cc " + name + " " + flags (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "Problem whith compiling") ok = (exitCode == 0) if ok: if not os.path.exists("driver"): report.Error("Driveri binary is not created") (exitCode, stdout, stderr) = util.RunCommand("./driver" + binext, 0, "Error running c++ generated c++ binary", true) ok = (exitCode == 0) #compare results with expected result files if ok: ok = util.WriteFile(baseName + ".res", stdout) else: report.Error("Output", None, stdout, stderr) if ok: resFile = resfile.FindResFile(name) ok = (resFile != None) if ok: resfile.CompareResult(name, baseName + ".res", resFile, None) return ok
def RunReplayTests(chromedriver, chrome=None, chrome_version=None, chrome_version_name=None): version_info = '' if chrome_version_name: version_info = '(%s)' % chrome_version_name util.MarkBuildStepStart('replay_tests%s' % version_info) _, log_path = tempfile.mkstemp(prefix='chromedriver_log_') print 'chromedriver server log: %s' % log_path cmd = [ sys.executable, os.path.join(_PARENT_DIR, 'log_replay', 'client_replay_test.py'), chromedriver, '--output-log-path=%s' % log_path ] if chrome: cmd.append('--chrome=%s' % chrome) if chrome_version: cmd.append('--chrome-version=%s' % chrome_version) code = util.RunCommand(cmd) if code: util.MarkBuildStepError() return code
def CodeGenerateImpl(fullName, lang, modules): global srcext localName = util.ExtractName(fullName) + ".vdm" # Clean out the files we expect as output from the code generation. for mod0 in modules: mod = mod0.replace("_", "_u") util.DeleteFiles([ mod + "." + srcext, mod + ".h", mod + "_anonym." + srcext, mod + "_anonym.h", mod + "_userdef.h" ]) # Run the code generation interpreter = cmdline.LookUpWildCard('cpp', lang, 'impl', 'code-generator') cmd = interpreter + " -c -P " + localName (exitCode, dummy1, dummy2) = util.RunCommand( cmd, 0, "Problem code generating specification, maybe syntax or type error in test case" ) ok = (exitCode == 0) if ok: ok = VerifyPresenceOfGeneratedFiles(fullName, modules) return ok
def CodeGenerateImpl(fullName, lang, modules): localName = util.ExtractName(fullName) + ".vdm" # Clean out the files we expect as output from the code generation. for mod in modules: util.DeleteFiles([mod + ".java", "external_" + mod + ".java"]) exitCode0 = os.system("rm -f ./quotes/* ./*.class") CopyFilesForMerging(fullName, modules) # Run the code generation interpreter = cmdline.LookUpWildCard('java', lang, 'impl', 'code-generator') options = convert.ParseOptionsFile(fullName) cmd = interpreter + options + " -j -e -P " + localName #cmd = interpreter + options + " -j -e -P -L " + localName (exitCode, dummy1, dummy2) = util.RunCommand( cmd, 0, "Problem code generating specification, " + "maybe syntax or type error in test case") ok = (exitCode == 0) if ok: ok = VerifyPresenceOfGeneratedFiles(fullName, modules) return ok
def RunTestCase(name, lang, type, coverageFile): stdout = None stderr = None baseName = util.ExtractName(name) report.setTestCaseName(name) ok = true #run the test cases cmd = cmdline.LookUpWildCard('cpp', lang, 'spec', 'interpreter') cmd = cmd + " -D -P -a -b -R " + coverageFile exitCode = util.RunCommand( cmd, 0, "Possible core dump while interpreting specification.") # See if a result file was created if ok: if not os.path.exists(baseName + ".arg.res"): report.Error("No result file generated for test case " + name, "Maybe it failed before") ok = false if ok: resFile = resfile.FindResFile(name) ok = (resFile != None) if ok: ok = resfile.CompareResult(name, baseName + ".arg.res", resFile, None) return ok
def StripSemValue(fullName, lang, dtc): interpreter = cmdline.LookUpWildCard('ip', lang, 'spec', 'sem-backend', dtc) inNm = util.ExtractName(fullName) + ".arg.res" outNm = util.ExtractName(fullName) + ".res" cmd = interpreter + " < " + inNm + " > " + outNm return util.RunCommand(cmd, 0, "Error while stripping semantic values")
def ParseJavaFile(javafiles, lang): javamain = cmdline.LookUp('java2vdm-parser') options = " " resultFile = parseRes cmd = javamain + options + javafiles (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "Problem parsing Java File", true) ok = (exitCode == 0) if ok: ok = VerifyPresenceOfGeneratedFiles(resultFile) # # Find errors in parser output # if ok: if dummy1.find("Error detected") != -1: ok = false if ok: ok = VerifyPresenceOfGeneratedFiles(resultFile) if not ok: report.Error("Parser failed to produce '" + resultFile + "' ", "") if not ok: report.Error("Parser failed '" + testName + "' ", dummy1) return ok
def _RunTest(java_tests_src_dir, jvm_args, sys_props, tests_report_file): """Runs a single JUnit test suite. Args: java_tests_src_dir: the directory to run the tests in. sys_props: Java system properties to set when running the tests. jvm_args: Java VM command line args to use. """ classpath = [] for name in glob.glob(java_tests_src_dir + "/jar/*.jar"): classpath.append(name) if util.IsWindows(): separator = ';' else: separator = ':' code = util.RunCommand( ['java'] + ['-D%s' % sys_prop for sys_prop in sys_props] + ['-D%s' % jvm_arg for jvm_arg in jvm_args] + [ '-cp', separator.join(classpath), 'org.junit.runner.JUnitCore', 'org.openqa.selenium.chrome.ChromeDriverTests' ], java_tests_src_dir, tests_report_file) if code != 0: print('FAILED to run java tests of ChromeDriverTests')
def CompileJavaFiles(dirName): global javac mainJava = dirName + "/MAIN.java" makeFile = dirName + "/Makefile" javaFiles = "" # # If Makefile is provided, use it to compile test java files. # Otherwise, compile all java files found recursively in dirName # if not os.path.exists(makeFile): FID = os.popen("ls `find " + dirName + " -type f -name \*.java`") line = FID.readline() while (line != ''): file = line[:-1] if os.path.abspath(file) != os.path.abspath(mainJava): javaFiles = file + " " + javaFiles line = FID.readline() classpath = " -classpath $CLASSPATH:" + dirName cmd = javac + classpath + " -d " + os.getcwd() + " " + javaFiles else: cmd = "make -C " + dirName (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "Javac returns errors. ") return exitCode == 0
def RunImplTestCase(fullName, lang, posdef): ok = convert.ConvertLanguage(lang, fullName) # Remove the files we expect output in to if ok: util.RemoveTestFiles(fullName, [".arg.res"]) # Run the test if ok: interpreter = cmdline.LookUpWildCard('tc', lang, 'impl', 'interpreter', posdef) if posdef == 'pos': defFlg = "" else: defFlg = " -d " localName = util.ExtractName(fullName) + ".vdm" resName = util.ExtractName(fullName) + ".arg.res" cmd = interpreter + defFlg + " -t -f " + localName + " 2>" + resName # Now run the interpreter (exitCode, stdout, stderr) = util.RunCommand(cmd, None, None, true, true) # Ensure that the interpreter actually did type check anything. # That is in contract to if it stoped due to a syntax error or a core dump. # if re.search("Type checking [^\n]* done", stdout) == None: # report.Error("text 'type check ... done' not found on stdout while running the type checker", # "This might be due to a core dump, a syntax error or the like\n" + # "This does anyway indicate that the type checking was never done", # stdout, stderr) # ok = false if ok: expResName = resfile.FindResFile(fullName) # read the result from the result file, and translate it to a list of numbers # result = TranslateResultImpl(stdout) result = TranslateResultImpl(util.ReadFile(resName)) if result == None: ok = false if ok: if expResName == None: print("Not validating result (2)") if util.KeepFile(false): WriteResult(fullName, result) ok = false if ok: # Validate the result. report.Progress( 4, "Validating result with result file: '" + expResName + "'") ok = ValidateResult(fullName, expResName, result, stdout, stderr) if util.KeepFile(ok): WriteResult(fullName, result) return ok
def CompareResult(fullName, outputFile, resFile, interpreter, structTest=true): report.Progress( 4, "Comparing result for " + ` fullName ` + " using diff method") # compare the results using normal "diff" actualResult = util.ReadFile(outputFile) expectedResult = util.ReadFile(resFile) # Remove duplicate white spaces and line breaks, spaces around commas and parenthesis. actualResult = string.strip(re.sub("\s+", " ", actualResult)) expectedResult = string.strip(re.sub("\s+", " ", expectedResult)) actualResult = string.strip(re.sub("\s*,\s*", ",", actualResult)) expectedResult = string.strip(re.sub("\s*,\s*", ",", expectedResult)) actualResult = string.strip(re.sub("\s*\(\s*", "(", actualResult)) expectedResult = string.strip(re.sub("\s*\(\s*", "(", expectedResult)) actualResult = string.strip(re.sub("\s*\)\s*", ")", actualResult)) expectedResult = string.strip(re.sub("\s*\)\s*", ")", expectedResult)) if actualResult == expectedResult: return true # Hmmm we need to try to compare using VDMDE then. if structTest and interpreter != None: report.Progress( 4, "Comparing result for " + ` fullName ` + " by build VDM value") # template = util.ReadFile(setup.BaseDir+"/../pogtestcases/compare-pog.vdm") template = util.ReadFile(setup.BaseDir + "/templates/compare-pog.vdm") if template == None: return false data = util.SubString('<<EXPECTED_RESULT>>', expectedResult, template) data = util.SubString('<<ACTUAL_RESULT>>', actualResult, data) ok = util.WriteFile("compare.vdm", data) if not ok: return false ok = util.WriteFile(".vdmtest", "compare.arg") # util.CopyFile(setup.BaseDir+"/../pogtestcases/compare-pog.arg","compare.arg") # util.CopyFile(setup.BaseDir+"/general-files/compare.arg","compare.arg") util.CopyFile(setup.BaseDir + "/general-files/compare-pog.arg", "compare.arg") # cmd = interpreter + " -a -b compare.vdm" cmd = interpreter + " -i compare.arg compare.vdm" (exitcode, dummy1, dummy2) = util.RunCommand(cmd) ok = (exitcode == 0) else: ok = false if not ok: report.Error( "Actual result is different from expected result for " + ` fullName `, "expected result : " + expectedResult + "\n" + "actual result : " + actualResult) if util.CleanFile(ok): util.DeleteFiles(["comapre.arg"]) return ok
def _RunAntTest(test_dir, test_class, class_path, sys_props): """Runs a single Ant JUnit test suite and returns the |TestResult|s. Args: test_dir: the directory to run the tests in. test_class: the name of the JUnit test suite class to run. class_path: the Java class path used when running the tests. sys_props: Java system properties to set when running the tests. """ def _CreateBuildConfig(test_name, results_file, class_path, junit_props, sys_props): def _SystemPropToXml(prop): key, value = prop.split('=') return '<sysproperty key="%s" value="%s"/>' % (key, value) return '\n'.join([ '<project>', ' <target name="test">', ' <junit %s>' % ' '.join(junit_props), ' <formatter type="xml"/>', ' <classpath>', ' <pathelement location="%s"/>' % class_path, ' </classpath>', ' ' + '\n '.join(map(_SystemPropToXml, sys_props)), ' <test name="%s" outfile="%s"/>' % (test_name, results_file), ' </junit>', ' </target>', '</project>']) def _ProcessResults(results_path): doc = minidom.parse(results_path) tests = [] for test in doc.getElementsByTagName('testcase'): name = test.getAttribute('classname') + '.' + test.getAttribute('name') time = test.getAttribute('time') failure = None error_nodes = test.getElementsByTagName('error') failure_nodes = test.getElementsByTagName('failure') if len(error_nodes) > 0: failure = error_nodes[0].childNodes[0].nodeValue elif len(failure_nodes) > 0: failure = failure_nodes[0].childNodes[0].nodeValue tests += [TestResult(name, time, failure)] return tests junit_props = ['printsummary="yes"', 'fork="yes"', 'haltonfailure="no"', 'haltonerror="no"'] ant_file = open(os.path.join(test_dir, 'build.xml'), 'w') ant_file.write(_CreateBuildConfig( test_class, 'results', class_path, junit_props, sys_props)) ant_file.close() util.RunCommand(['ant', 'test'], cwd=test_dir) return _ProcessResults(os.path.join(test_dir, 'results.xml'))
def main(): parser = optparse.OptionParser() parser.add_option( '', '--android-packages', help='Comma separated list of application package names, ' 'if running tests on Android.') parser.add_option('-r', '--revision', type='int', help='Chromium revision') parser.add_option( '', '--update-log', action='store_true', help='Update the test results log (only applicable to Android)') options, _ = parser.parse_args() bitness = '32' if util.IsLinux() and platform_module.architecture()[0] == '64bit': bitness = '64' platform = '%s%s' % (util.GetPlatformName(), bitness) if options.android_packages: platform = 'android' if platform != 'android': _KillChromes() _CleanTmpDir() if platform == 'android': if not options.revision and options.update_log: parser.error('Must supply a --revision with --update-log') _DownloadPrebuilts() else: if not options.revision: parser.error('Must supply a --revision') if platform == 'linux64': _ArchivePrebuilts(options.revision) _WaitForLatestSnapshot(options.revision) _AddToolsToPath(platform) cmd = [ sys.executable, os.path.join(_THIS_DIR, 'test', 'run_all_tests.py'), ] if platform == 'android': cmd.append('--android-packages=' + options.android_packages) passed = (util.RunCommand(cmd) == 0) _ArchiveServerLogs() if platform == 'android': if options.update_log: util.MarkBuildStepStart('update test result log') _UpdateTestResultsLog(platform, options.revision, passed) elif passed: _ArchiveGoodBuild(platform, options.revision) _MaybeRelease(platform)
def RunImplTestCase(fullName, lang, dtc): # Remove the files we expect output in to util.RemoveTestFiles(fullName, [".arg.res"]) # Run the test interpreter = cmdline.LookUpWildCard('pog', lang, 'impl', 'interpreter') localName = util.ExtractName(fullName) + ".vdm" outputFile = util.ExtractName(fullName) + ".res" argFile = util.ExtractName(fullName) + ".arg" ok = SetupArgFile(fullName, lang) if not ok: return false if dtc == 'dtcon': dtccmd = "-DPIQ" else: dtccmd = "" cmd = interpreter + " -G -O " + outputFile + " " + localName util.DeleteFiles([outputFile]) # Now run the interpreter (exitCode, stdout, stderr) = util.RunCommand(cmd, None, None, true) expResultFile = resfile.FindResFile(fullName) if expResultFile == None: ok = false if ok: if exitCode != 0: ## Maybe a runtime error occured. # Note the order of the next binary expression ensures that # CompareRunTimeError is executed even though no expected result was # found! This is necesary as this function generates the result one # can copy in place ok = CompareRunTimeError(fullName, expResultFile, stdout) and ok return ok # Was output produced? if not os.path.exists(outputFile): report.Error( ` cmd ` + " didn't produce the expected result file: " + ` outputFile `, "Either command was malformed, or the interpreter crashed.") return false interpreter = cmdline.LookUpWildCard('pog', lang, 'impl', 'spec-compare') # ok = resfile.CompareResult(fullName, outputFile, expResultFile, interpreter) ok = CompareResult(fullName, outputFile, expResultFile, interpreter) return ok
def AS2CS(asv, cs): global backend cmd = backend + " -d " + cs + " " + asv (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "") ok = exitCode == 0 if exitCode != 0: return false return ok
def RunSpecTestCases(fullNames, lang, coverageFile): # remove files we expect output in to. for fullName in fullNames: util.RemoveTestFiles(fullName, [".arg.pt", ".arg.res", ".arg.err", ".arg.msg"]) exitCode0 = os.system("rm -f quotes/*.java") # run the test cases cmd = cmdline.LookUpWildCard('java', lang, 'spec', 'code-generator') cmd = cmd + " -D -a -b -R " + coverageFile exitCode = util.RunCommand( cmd, 0, "Possible core dump while interpreting specification.") okNames = [] # Now validate the results for fullName in fullNames: bn = util.ExtractName(fullName) semResName = bn + ".arg.res" resName = bn + ".res" report.setTestCaseName(fullName) ok = true if ok: # Find the module or class names of the input specification. modules = convert.ModulesInSpecification(fullName, lang) if modules == None: ok = false # Clean out the files we expect as output from the code generation. for mod in modules: util.DeleteFiles([mod + ".java", "external_" + mod + ".java"]) # See if a result file was created if ok: if not os.path.exists(semResName): report.Error( "No result file generated for test case " + fullName, "Maybe the code generation failed for " + "one of the previous test cases") ok = false convert.ExtractArgFileInfo(fullName) if ok: ok = ExtractSourceFiles(fullName, lang, modules) if ok: ok = CompileRunAndCompare(fullName, lang, 'spec', modules) if ok: okNames.append(fullName) return (okNames, modules)
def RunSpecTestCases(fullNames, lang, dtc, coverageFile): # remove files we expect output in to. for fullName in fullNames: util.RemoveTestFiles(fullName, [".arg.pt", ".arg.res", ".arg.err", ".arg.msg"]) # run the test cases interpreter = cmdline.LookUpWildCard('pog', lang, 'spec', 'interpreter') # cmd = interpreter + " -a -b -R " + coverageFile + " ../pog/test.vdm" cmd = interpreter + " -a -b -R " + coverageFile exitCode = util.RunCommand(cmd, 0, "Possible core dump while interpreting specification.") okNames = [] # Now validate the results for fullName in fullNames: bn = util.ExtractName(fullName) semResName = bn + ".arg.res" resName = bn + ".res" errName = bn + ".arg.msg" report.setTestCaseName(fullName) # See if a result file was created if not os.path.exists(semResName) and not os.path.exists(errName): report.Error("No result or error file generated for test case " + fullName, "Maybe the interpreting toolbox failed for one of the previous test cases") continue # Find expected result file ok = true expResultFile = FindResFile(fullName) if expResultFile == None: ok = false if os.path.exists(errName): # See if an error file is generated. data = util.ReadFile(errName) if data == None: continue # Note the order of the next binary expression ensures that # CompareRunTimeError is executed even though no expected result was # found! This is necesary as this function generates the result one # can copy in place ok = CompareRunTimeError(fullName, expResultFile, data) and ok else: # Strip sem values ok = ok and StripSemValue(fullName, lang, dtc) # validate expected result then if ok: interpreter = cmdline.LookUpWildCard('pog', lang, 'spec', 'spec-compare') ok = CompareResult(fullName, resName, expResultFile, interpreter) if ok: okNames.append(fullName) return okNames
def ExtractSourceFiles(fullName, lang, modules): resName = util.ExtractName(fullName) + ".arg.res" cmd = cmdline.LookUpWildCard('cpp', lang, 'spec', 'extract-source-backend') cmd = cmd + " cpp " + resName (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "Problem extracting source files from output from C++ code generator specification") ok = (exitCode == 0) if ok: ok = VerifyPresenceOfGeneratedFiles(fullName, modules) return ok
def CompileRunAndCompare(fullName, lang, type, modules): global packageMap baseName = util.ExtractName(fullName) ok = true if ok: ok = CreateArgFile(fullName, lang, modules) standardlibs = convert.GetStandardLibs() libdir = os.path.expandvars(cmdline.LookUp('java-stdlib-dir')) for lib in standardlibs: libfile = lib + ".java" util.CopyFile(libdir + "/" + libfile, libfile) if ok: ok = CompileJavaFiles(fullName, lang, type, modules) interpreter = os.path.expandvars( cmdline.LookUpWildCard('java', lang, type, 'interpreter')) if ok: # Execute the binary flags = os.path.expandvars( cmdline.LookUpWildCard('java', lang, type, 'rtflags')) (exitCode, stdout, stderr) = util.RunCommand(interpreter + " " + flags + " TMAIN", 0, "Error running Java " + "generated binary", true) print("java " + flags + " TMAIN run") ok = (exitCode == 0) if ok: ok = util.WriteFile(baseName + ".res", stdout) if ok: resFile = resfile.FindResFile(fullName) ok = (resFile != None) if ok: ok = CompareRunTimeError(fullName, resFile) lib = "" if ok == None: # It was not a runtime error interpreter = lib + cmdline.LookUpWildCard('java', lang, type, 'spec-compare') # Compare the result ok = resfile.CompareResult(fullName, baseName + ".res", resFile, interpreter) return ok
def RunPythonTests(chromedriver, chrome=None, android_package=None): util.MarkBuildStepStart('python_tests') code = util.RunCommand( _GenerateTestCommand('run_py_tests.py', chromedriver, chrome=chrome, android_package=android_package)) if code: util.MarkBuildStepError() return code
def RunSpecTestCases(names, lang, posdef, coverageFile): # remove files we expect output in to. for fullName in names: util.RemoveTestFiles(fullName, [".arg.pt", ".arg.res"]) # run the test cases interpreter = cmdline.LookUpWildCard('tc', lang, 'spec', 'interpreter', posdef) cmd = interpreter + " -a -b -I -D -P -R " + coverageFile exitCode = util.RunCommand( cmd, 0, "Possible core dump while interpreting specification.", false, true) okNames = [] # Now validate the results for fullName in names: bn = util.ExtractName(fullName) resName = bn + ".arg.res" report.setTestCaseName(fullName) # See if a result file was created if not os.path.exists(resName): report.Error( "No result generated for test case " + fullName, "Maybe the interpreting toolbox failed for one of the previous test cases" ) continue # read the result from the result file, and translate it to a list of numbers result = TranslateResultSpec(fullName) if result == None: continue # Find the expected result file expResName = resfile.FindResFile(fullName) if expResName == None: if util.KeepFile(false): WriteResult(fullName, result) continue # Validate the result. report.Progress( 4, "Validating result with result file: " + ` expResName `) ok = ValidateResult(fullName, expResName, result, None, None) if ok: okNames.append(fullName) if util.KeepFile(ok): WriteResult(fullName, result) return okNames
def _KillChromes(): chrome_map = { 'win': 'chrome.exe', 'mac': 'Chromium', 'linux': 'chrome', } if util.IsWindows(): cmd = ['taskkill', '/F', '/IM'] else: cmd = ['killall', '-9'] cmd.append(chrome_map[util.GetPlatformName()]) util.RunCommand(cmd)
def RunJavaTests(chromedriver, chrome=None, android_package=None, verbose=False): util.MarkBuildStepStart('java_tests') code = util.RunCommand( _GenerateTestCommand('run_java_tests.py', chromedriver, chrome=chrome, android_package=android_package, verbose=verbose)) if code: util.MarkBuildStepError() return code
def _CallMethod(self, method_type, method_name, infilename, outfilename, identifier=None, dbname=None): cmd, outfilename = self._GetCommand(method_type, method_name, infilename, outfilename, identifier, dbname) # print(cmd) util.RunCommand(cmd, shell=True, qHideOutput=False) return outfilename
def Astral(tree_dir, astral_jar_file, qForOF=False): treesFN = tree_dir + "../TreesFile.txt" with open(treesFN, 'wb') as outfile: for fn in glob.glob(tree_dir + "/*"): t = tree.Tree(fn) outfile.write(t.write(format=9) + "\n") speciesTreeFN = tree_dir + "../SpeciesTree_ids.txt" command = " ".join(["java", "-Xmx6000M", "-jar", astral_jar_file, "-i", treesFN, "-o", speciesTreeFN]) if qForOF: import util util.RunCommand(command, True, True, True) else: subprocess.call(command, shell=True) return tree.Tree(speciesTreeFN)
def RunSpecTestCases(fullNames, lang, coverageFile): # remove files we expect output in to. for fullName in fullNames: util.RemoveTestFiles(fullName, [".arg.pt", ".arg.res", ".arg.err", ".arg.msg"]) # run the test cases #cmd = "LD_LIBRARY_PATH=/usr/local/omniORB/lib " #cmd = cmd + cmdline.LookUpWildCard('cpp', lang, 'spec', 'interpreter') cmd = cmdline.LookUpWildCard('cpp', lang, 'spec', 'interpreter') cmd = cmd + " -D -P -a -b -R " + coverageFile exitCode = util.RunCommand( cmd, 0, "Possible core dump while interpreting specification.") okNames = [] # Now validate the results for fullName in fullNames: bn = util.ExtractName(fullName) semResName = bn + ".arg.res" resName = bn + ".res" report.setTestCaseName(fullName) ok = true if ok: # Find the module or class names of the input specification. modules = convert.ModulesInSpecification(fullName, lang) if modules == None: ok = false # See if a result file was created if ok: if not os.path.exists(semResName): report.Error( "No result file generated for test case " + fullName, "Maybe the code generation failed for one of the previous test cases" ) ok = false if ok: ok = ExtractSourceFiles(fullName, lang, modules) if ok: ok = CompileRunAndCompare(fullName, lang, 'spec', modules) if ok: okNames.append(fullName) return okNames
def ProcessTrees(dir_in, dir_matrices, dir_trees_out, GeneToSpecies, qVerbose=True, qSkipSingleCopy=False, qForOF=False): nSp = GeneToSpecies.NumberOfSpecies() s_to_i = GeneToSpecies.SpeciesToIndexDict() nSuccess = 0 nNotAllPresent = 0 nFail = 0 if qVerbose: print("\nProcessing gene trees:") for fn in glob.glob(dir_in + "/*"): try: t = tree.Tree(fn) except newick.NewickError: print(os.path.split(fn)[1] + " - WARNING: ETE could not interpret tree file, it will be ignored") nFail += 1 continue try: genes = t.get_leaf_names() species = map(GeneToSpecies.ToSpecies, genes) except UnrecognisedGene, e: print(os.path.split(fn)[1] + " - WARNING: unrecognised gene, %s" % e.message) nFail += 1 continue nThis = len(set(species)) if nThis != nSp: # print(os.path.split(fn)[1] + " - Only %d species, skipping" % nThis) nNotAllPresent += 1 continue if qSkipSingleCopy and nThis == len(genes): # Single copy - don't recalculate the tree treeOutFN = dir_trees_out + os.path.split(fn)[1] + ".tre" for n in t: n.name = s_to_i[GeneToSpecies.ToSpecies(n.name)] t.write(outfile = treeOutFN, format=5) if qVerbose: print(os.path.split(fn)[1] + " - Processed") continue g_to_i = {g:s_to_i[s] for g,s in zip(genes, species)} D = GetDistances_fast(t, nSp, g_to_i) species_names_fastme = map(str,xrange(nSp)) matrixFN = dir_matrices + os.path.split(fn)[1] + ".dist.phylip" treeOutFN = dir_trees_out + os.path.split(fn)[1] + ".tre" WritePhylipMatrix(D, species_names_fastme, matrixFN, max_og=1e6) command = "fastme -i %s -o %s -w O -s -n" % (matrixFN, treeOutFN) if qForOF: import util util.RunCommand(command, True, True, True) else: popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) popen.communicate() nSuccess += 1 if qVerbose: print(os.path.split(fn)[1] + " - Processed")
def VDM2AST(fileName,parser, typeCheck): base = util.ExtractName(fileName) vdmFile = base + ".vdm" astFile = base + ".ast" report.Progress(4, "Converting testcase from VDM to AST (creating " + astFile + ")") # First delete the m4pp file. if (os.path.exists("m4pp")): try: os.unlink("m4pp") except os.error: _, (_, msg), _ = sys.exc_info() report.Error("Error while removing 'm4pp': " + msg) return false # run the parser if typeCheck: cmd = parser + " -ta " + vdmFile else: cmd = parser + " -pa " + vdmFile (exitCode, stdout, stderr) = util.RunCommand(cmd, 0, "Error in input file (" + vdmFile +"), possible syntax error") if exitCode != 0: return false # verify that a m4pp file has been created. if not os.path.exists("m4pp"): report.Error("command '" + cmd + "' didn't produce a m4pp file, though return code was 0", "Command may not be a vdmde command with -pa flag") return false # Finally move m4pp to 'astFile' if (os.path.exists(astFile)): try: os.unlink(astFile) except os.error: _, (_, msg), _ = sys.exc_info() report.Error("Error while removing " + astFile + ": " + msg) return false try: os.rename('m4pp', astFile) except os.error: _, (_, msg), _ = sys.exc_info() report.Error("Couldn't move file 'm4pp' to " + astFile + ": " + msg) return false return true
def CompileJavaFiles(fullName, lang, type, modules): baseName = util.ExtractName(fullName) # Find the compiler to use compiler = os.path.expandvars( cmdline.LookUpWildCard('java', lang, type, 'compiler')) # figure out the names of all the Java files javaFiles = "TMAIN.java" for mod in modules: if os.path.exists(mod + ".java"): javaFiles = javaFiles + " " + mod + ".java" else: package = convert.GetModCls() packageStrings = package.split('.') packageDir = util.join(packageStrings, '/') if os.path.exists(packageDir + ".java") and not packageDir in modules: print("-------> here") javaFiles = javaFiles + " " + packageDir + ".java" if os.path.exists("external_" + mod + ".java"): javaFiles = javaFiles + " external_" + mod + ".java" # Find the flags for the compiler flags = os.path.expandvars( os.path.expanduser(cmdline.LookUpWildCard('java', lang, type, 'cflags'))) # First delete the binary. util.DeleteFiles(["TMAIN.class"]) # build the command and execute it. cmd = compiler + " -d . " + flags + " " + javaFiles (exitCode, dummy1, dummy2) = util.RunCommand(cmd, 0, "Problem when compiling generated code") ok = (exitCode == 0) if ok: if not os.path.exists("TMAIN.class"): report.Error( "TMAIN.class was not created as a result of compiling the generated Java files" ) return false return ok
def RunJavaTests(chromedriver, chrome=None, chrome_version=None, chrome_version_name=None, android_package=None): version_info = '' if chrome_version_name: version_info = '(v%s)' % chrome_version_name util.MarkBuildStepStart('java_tests%s' % version_info) code = util.RunCommand( _GenerateTestCommand('run_java_tests.py', chromedriver, ref_chromedriver=None, chrome=chrome, chrome_version=chrome_version, android_package=android_package)) if code: util.MarkBuildStepError() return code