def _compile_fortpyf90(self, tversion): """Compiles a fortpy.mod and fortpy.o for the current compiler. """ msg.info("Compiling fortpy.mod and fortpy.f90 for {}".format(self.compiler)) from os import waitpid, path from subprocess import Popen, PIPE from fortpy.testing.compilers import executor, replace command = "cd {0}; {1} fortpy.f90; {1} -c fortpy.f90".format(self.fortpy_templates, executor(self.compiler)) pcompile = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE) waitpid(pcompile.pid, 0) opath = path.join(self.fortpy_templates, "fortpy.o") mpath = path.join(self.fortpy_templates, "fortpy.mod") if path.isfile(opath) and path.isfile(mpath): from shutil import move nopath = path.join(self.fortpy_templates, replace("fortpy.o.[c]", self.compiler)) nmpath = path.join(self.fortpy_templates, replace("fortpy.mod.[c]", self.compiler)) move(opath, nopath) move(mpath, nmpath) #Create the version files so we can keep track of the compiled versions. vpaths = [nopath + ".v", nmpath + ".v"] for vp in vpaths: with open(vp, 'w') as f: f.write('#<fortpy version="{}" />'.format('.'.join(map(str, tversion)))) else: msg.err("Unable to generate fortpy.o and fortpy.mod.")
def _get_casepath(wizard): """Returns the full path to the execution directory for the current test and case. """ from fortpy.testing.compilers import replace xstage = path.join(wizard.stagedir, wizard.xauto.full_name) target = replace(xstage + ".[c]", wizard.compiler) tfolder = "{}{}".format(wizard.tauto.identifier, '.' + wizard.caseauto if wizard.caseauto is None else "") return path.join(target, "tests", tfolder)
def generate(parser, coderoot, stagedir, compiler=None, debug=False, profile=False, strict=False, docompile=True): """Generates the f90 module file for all members referenced in the parser's modules list. """ import fortpy from fortpy.utility import get_fortpy_templates_dir from fortpy.testing.elements import AutoClasser members = _find_members(parser, coderoot) folder = "filename" classers = [AutoClasser(m, folder, m.name, [], coderoot, True) for m in members] from os import path templates = get_fortpy_templates_dir() statpath = path.join(templates, "fpy_auxiliary.f90") with open(statpath) as f: static = f.read() from fortpy.printing.formatting import present_params xnames, modcode, routines, modules = _generate_single(classers) from fortpy.code import order_module_dependencies modules = order_module_dependencies(modules, parser) from os import mkdir, path if docompile: auxdir = path.join(stagedir, "fortpy.aux") if not path.isdir(auxdir): mkdir(auxdir) else: auxdir = coderoot if docompile and not _should_recompile(auxdir, parser, modules, compiler): msg.okay("The current version of fpy_auxiliary.f90 is up-to-date.") from fortpy.testing.compilers import replace target = replace(auxdir + ".[c]", compiler) return (0, True, target) static = static.replace("__auxsave__", present_params(xnames, 21)) static = static.replace("__aux_uses__", '\n'.join(modcode)) static = static.replace("__version__", fortpy.__version__) static = static.replace("__fxauxsave__", '\n'.join(routines)) xnames, modcode, routines, dmods = _generate_single(classers, False) static = static.replace("__auxread__", present_params(xnames, 21)) static = static.replace("__fxauxread__", '\n'.join(routines)) fortpath = path.join(auxdir, "fpy_auxiliary.f90") with open(fortpath, 'w') as f: f.write(static) if docompile: _prepare_dir(parser, modules, auxdir) return _compile(auxdir, compiler, debug, profile)
def runall(self, compiler=None): """Compiles and runs each new or modified unit test executable. After the run is complete, the outcomes are checked for consistency. :arg compiler: the name of the compiler in 'compilers.xml' to use. """ #We will likely need a file comparer for the outcomes testing self.comparer = FileComparer(self.fortpy_templates, self.compare_templates) if self._written: self.set_compiler(compiler) from fortpy.testing.compilers import replace from fortpy.testing.auxiliary import generate from fortpy.utility import symlink from fortpy.code import config #Run them each individually and return a dictionary of all the #test results result = {} fpyauxs = [] for composite_id in self.tgenerator.tests_to_run: identifier, testid = composite_id.split("|") #Compile and copy fpy_auxiliary if it isn't in the identifiers directory yet. source = path.join(self.libraryroot(identifier), identifier) target = replace(source + ".[c]", self.compiler) if not path.isdir(target): from os import mkdir mkdir(target) if self.writer( identifier).autoclass and identifier not in fpyauxs: code, success, fpytarget = generate( self.parser, self._codefolder, self.libraryroot(identifier), self.compiler, self.debug, self.profile, self.strict) sopath = path.join(fpytarget, "fpy_aux.so") sotarget = path.join(target, "fpy_aux.so") mpath = path.join(fpytarget, "fpy_auxiliary.mod") mtarget = path.join(target, "fpy_auxiliary.mod") symlink(sopath, sotarget) symlink(mpath, mtarget) fpyauxs.append(identifier) oneresult = self._run_single(identifier, testid, source) if oneresult is not None: result[composite_id] = oneresult return result else: msg.warn( "you can't run tests until the executables have been written. Exiting." ) return None
def runall(self, compiler=None): """Compiles and runs each new or modified unit test executable. After the run is complete, the outcomes are checked for consistency. :arg compiler: the name of the compiler in 'compilers.xml' to use. """ #We will likely need a file comparer for the outcomes testing self.comparer = FileComparer(self.fortpy_templates, self.compare_templates) if self._written: self.set_compiler(compiler) from fortpy.testing.compilers import replace from fortpy.testing.auxiliary import generate from fortpy.utility import symlink from fortpy.code import config #Run them each individually and return a dictionary of all the #test results result = {} fpyauxs= [] for composite_id in self.tgenerator.tests_to_run: identifier, testid = composite_id.split("|") #Compile and copy fpy_auxiliary if it isn't in the identifiers directory yet. source = path.join(self.libraryroot(identifier), identifier) target = replace(source + ".[c]", self.compiler) if not path.isdir(target): from os import mkdir mkdir(target) if self.writer(identifier).autoclass and identifier not in fpyauxs: code, success, fpytarget = generate(self.parser, self._codefolder, self.libraryroot(identifier), self.compiler, self.debug, self.profile, self.strict) sopath = path.join(fpytarget, "fpy_aux.so") sotarget = path.join(target, "fpy_aux.so") mpath = path.join(fpytarget, "fpy_auxiliary.mod") mtarget = path.join(target, "fpy_auxiliary.mod") symlink(sopath, sotarget) symlink(mpath, mtarget) fpyauxs.append(identifier) oneresult = self._run_single(identifier, testid, source) if oneresult is not None: result[composite_id] = oneresult return result else: msg.warn("you can't run tests until the executables have been written. Exiting.") return None
def _run_compile(self, identifier, testid): """Compiles the executable that was created for the specified identifier, returns True if the compile was successful.""" #Because we often run the tests for multiple compiler versions, we need #a copy of the execution directory that was setup for the testing. from fortpy.testing.compilers import replace, executor, family from fortpy.utility import copytree from os import path source = path.join(self.libraryroot(identifier), identifier) target = replace(source + ".[c]", self.compiler) copytree(source, target) #Before we compile, we need to make sure we have the fortpy.o and fortpy.mod #files for the specific compiler. tversion = self.template_version("fortpy.f90") for sdfile in ["fortpy.o", "fortpy.mod"]: fdfile = replace(sdfile + ".[c]", self.compiler) ftarget = path.join(target, sdfile) dversion = self.get_fortpy_version(ftarget) if not path.isfile(ftarget) or dversion != tversion: from shutil import copy source = path.join(self.fortpy_templates, fdfile) sversion = self.get_fortpy_version(source) if not path.isfile(source) or sversion != tversion: self._compile_fortpyf90(tversion) msg.info(" COPY: {}".format(source)) copy(source, ftarget) #If the file is a binary, we need to save a .v with version #information as well for the next time we want to copy it. pre, ext = path.splitext(ftarget) if ext in [".o", ".so", ".mod"]: with open(ftarget + '.v', 'w') as f: f.write("# <fortpy version=\"{}\" />".format('.'.join(map(str, tversion)))) #Find the target folder that has the executables etc then run #make and check the exit code. msg.blank() options = "" if self.debug: options += " DEBUG=true" if self.profile: options += " GPROF=true" codestr = "cd {}; make -f 'Makefile.{}' F90='{}' FAM='{}'" + options #If we are running in quiet mode, we don't want the compile information #to post to stdout; only errors should be redirected. This means we need #to wrap the execution in a subprocess and redirect the std* to PIPE from os import waitpid from subprocess import Popen, PIPE command = codestr.format(target, testid, executor(self.compiler), family(self.compiler)) pcompile = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE) waitpid(pcompile.pid, 0) if not self.quiet: output = [x.decode('utf8') for x in pcompile.stdout.readlines()] msg.std(''.join(output)) #else: #We don't need to get these lines since we are purposefully redirecting them. error = pcompile.stderr.readlines() code = len(error) if code != 0: msg.err(''.join(error)) #It turns out that the compiler still returns a code of zero, even if the compile #failed because the actual compiler didn't fail; it did its job properly. We need to #check for the existence of errors in the 'compile.log' file. lcount = 0 errors = [] log = path.join(target, "compile.log") with open(log) as f: for line in f: lcount += 1 if lcount > 21 and lcount < 32: errors.append(line) elif lcount > 21: break if len(errors) > 0: #There are 21 lines in the compile.log file when everything runs correctly #Overwrite code with a bad exit value since we have some other problems. code = 1 #We want to write the first couple of errors to the console and give them the #option to still execute if the compile only generated warnings. msg.warn("compile generated some errors or warnings:") msg.blank() msg.info(''.join(errors)) #If the executable exists, we could still prompt them to run it (in case the #additional lines were just warnings). exe = path.join(target, "{}.x".format(testid)) if path.isfile(exe): choice = input("\nWould you still like to run the executable? ").lower() code = 0 if "y" in choice else code if "n" in choice: msg.err("Unit testing terminated by user.") exit(0) else: msg.err("Could not compile executable {}.x".format(testid)) exit(-1) return code == 0, target