Beispiel #1
0
    def _execute_testpath(self, testspec, testwriter, testpath, exepath, result, 
                          tester, caseid, case=""):
        """Executes the unit test in the specified testing folder for 'case'."""
        if not path.exists(testpath):
            mkdir(testpath)

        #Copy across all the input files we need to run.
        for i in testspec.inputs:
            i.copy(self._codefolder, testpath, case, self.compiler)
        #Also copy any assignment file dependencies.
        testwriter.copy(self._codefolder, testpath, case, testspec.identifier, self.compiler)
        #Clean the testing folder to remove any target variable output files
        #from any earlier test runs.
        testspec.clean(testpath)
        testwriter.setup(testpath)

        #If the testspec needs auto-class support, write the case to file.
        if testspec.autoclass:
            with open(path.join(testpath, "fpy_case"), 'w') as f:
                f.write('"{}"'.format(case))
        
        #Save the path to the folder for execution in the result.
        result.paths[caseid] = testpath

        msg.okay("Executing {}.x in folder ./tests{}".format(testspec.identifier, 
                                                             testpath.split("tests")[1]))
        start_time = clock()                              
        from os import waitpid
        from subprocess import Popen, PIPE
        command = "cd {}; {}".format(testpath, exepath)
        prun = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
        waitpid(prun.pid, 0)        
        if not self.quiet:
            output = prun.stdout.readlines()
            if len(output) > 0:
                msg.std(''.join(output))
        #else: #We don't need to get these lines since we are purposefully redirecting them.
        error = prun.stderr.readlines()
        if len(error) > 0:
            if self.quiet:
                msg.info("With Executable at {}".format(exepath), 1)
            msg.err('\n  '+'  '.join(error))
        code = len(error)
        
        if case == "":
            result.cases[caseid] = ExecutionResult(testpath, code, clock() - start_time, tester)
        else:
            result.cases[caseid] = ExecutionResult(testpath, code, clock() - start_time, tester, case)
        self._write_success(testpath, code)

        #See if we need to run the post-execution profiling
        if self.profile:
            profiling.profile(testpath, testspec.testgroup.method_fullname, 
                              exepath, self.compiler)
Beispiel #2
0
    def _finalize_testpath(self, result, caseid, testpath, exepath, testspec, code,
                           start_time, tester, case=""):
        """Cleans up and sets results for the test that ran.
        """
        if case == "":
            result.cases[caseid] = ExecutionResult(testpath, code, clock() - start_time, tester)
        else:
            result.cases[caseid] = ExecutionResult(testpath, code, clock() - start_time, tester, case)
        self._write_success(testpath, code)

        #See if we need to run the post-execution profiling
        if self.profile:
            profiling.profile(testpath, testspec.testgroup.method_fullname, 
                              exepath, self.compiler)
Beispiel #3
0
class UnitTester(object):
    """Performs automatic unit testing of individual subroutines and
    functions based on XML doctags found in the code files.

    :arg libraryroot: the path to folder in which to stage the tests.
    """
    def __init__(self, libraryroot=None, verbose=False, compare_templates=None,
                 fortpy_templates=None, rerun=None , compiler=None,
                 debug=False, profile=False, quiet=False):
        self.parser = CodeParser()
        self.parser.verbose = verbose
        from fortpy.utility import set_fortpy_templates
        set_fortpy_templates(self, fortpy_templates)
        self.tgenerator = TestGenerator(self.parser, libraryroot, self.fortpy_templates, self, rerun)
        self.compiler = None
        """The full path to the compiler to use for the unit tests.
        """
        self.quiet = quiet
        """Specifies whether the tester should run in quiet mode, which only prints
        essential information for the unit tests to stdout.
        """
        self.debug = debug == True
        self.set_compiler(compiler)
        self.profile = self._profiler_exists(profile)
        
        #A flag to track whether the generator has already written
        #the executables.
        self._written = False
        #The user's raw value for the compare_templates directory
        self._compare_templates = compare_templates
        self._templatev = {}
        """Holds the version number of the fortpy.f90 file."""
        
    def template_version(self, filename):
        """Returns the version number of the latest fortpy.f90 file."""
        if filename not in self._templatev:
            from os import path
            from fortpy.utility import get_fortpy_templates_dir
            tempath = path.join(get_fortpy_templates_dir(), filename)
            self._templatev[filename] = self.get_fortpy_version(tempath)

        return self._templatev[filename]

    def get_fortpy_version(self, fortpath, recursed=False):
        """Gets the fortpy version number from the first line of the specified file."""
        result = []
        #If the file doesn't exist yet, we don't try to find the version information.
        from os import path
        if not path.isfile(fortpath) or path.splitext(fortpath)[1] in [".o", ".mod"]:
            if path.isfile(fortpath + '.v'):
                return self.get_fortpy_version(fortpath + '.v', True)
            else:
                return result

        with open(fortpath) as f:
            for line in f:
                try:
                    lt = line.index("<")
                    vxml = "<doc>{}</doc>".format(line[lt::])
                except ValueError:
                    vxml = ""
                break
            
        if "<fortpy" in vxml:
            import xml.etree.ElementTree as ET
            x = list(ET.XML(vxml))
            if len(x) > 0:
                result = list(map(int, x[0].attrib["version"].split(".")))

        if len(result) == 0 and not recursed:
            return self.get_fortpy_version(fortpath + '.v', True)
        else:
            return result

    def tests(self, identifier):
        """Returns a dictionary of all the tests that need to be run for the
        specified module.executable identifier."""
        if identifier in self.tgenerator.xtests:
            return self.tgenerator.xtests[identifier]
        else:
            return {}

    def writer(self, identifier):
        """Returns the underlying executable writer that has a list of ordered
        method/assignment dependencies."""
        if identifier in self.tgenerator.xwriters:
            return self.tgenerator.xwriters[identifier]
        
    def libraryroot(self, identifier):
        """Returns the absolute path to the staging directory for the unit tests with
        the specified testid.
        """
        if identifier in self.tgenerator.xgenerator.folders:
            return self.tgenerator.xgenerator.folders[identifier]

    def _profiler_exists(self, profile):
        """Tests whether we have gprof available to do the profiling of the methods
        to unit test.
        
        :arg profile: the value specified in the UnitTester constructor.
        """
        if profile==True:
            gprof = self.which("gprof")
            if gprof is None:
                msg.err("gprof is required to run profiling with fortpy.")
                exit(1)
            else:
                return True
        else:
            return False

    def set_compiler(self, compiler):
        """Sets the compiler to use for the unit testing of this code parser.
        """
        if compiler is not None:
            self.compiler = compiler
            self._compiler_exists()

    def _compiler_exists(self):
        """Tests whether the specified compiler is available on the machine. If
        it isn't give an error and exit."""
        from fortpy.testing.compilers import compilers
        if self.compiler in compilers:
            #Overwrite the *name* of the compiler with its full path; since
            #fortpy assumes that self.compiler is the name of a valid executable
            #this will still work correctly.
            compiler = compilers[self.compiler].path
        else:
            compiler = self.compiler

        if self.which(compiler) is None:
            msg.err("compiler {} not found. Exiting.".format(self.compiler))
            exit(1)
        
    def which(self, program):
        """Tests whether the specified program is anywhere in the environment
        PATH so that it probably exists."""
        import os
        def is_exe(fpath):
            return os.path.isfile(fpath) and os.access(fpath, os.X_OK)

        fpath, fname = os.path.split(program)
        if fpath:
            if is_exe(program):
                return program
        else:
            for path in os.environ["PATH"].split(os.pathsep):
                path = path.strip('"')
                exe_file = os.path.join(path, program)
                if is_exe(exe_file):
                    return exe_file

        return None

    def writeall(self, codefolder):
        """Writes all the unit test executables that are new or modified
        as well as the makefiles for all subroutines in all modules."""
        #The test generator already loops over all modules in the code
        #parser and does all the heavy-lifting. We need to pre-load any
        #modules that we are interested in testing. Only those loaded
        #when write() is first called will have their tests processed.
        self._codefolder = path.abspath(codefolder)
        if self._compare_templates is not None:
            self.compare_templates = path.abspath(self._compare_templates)
        else:
            self.compare_templates = path.join(self._codefolder, "templates/")

        #We will load all the modules in the code folder specified and
        #then run the test generator.
        files = {}
        self.parser.scan_path(self._codefolder, files)
        for f in files:
            filepath = files[f]
            self.parser.parse(filepath, True, True)

        #Now that we have loaded all the codefiles in the path, we can
        #generate the unittest executables
        self.tgenerator.write(self._codefolder)
        self._written = True

    def runall(self, compiler=None):
        """Compiles and runs each new or modified unit test executable.
        After the run is complete, the outcomes are checked for consistency.

        :arg compiler: the name of the compiler in 'compilers.xml' to use.
        """
        #We will likely need a file comparer for the outcomes testing
        self.comparer = FileComparer(self.fortpy_templates, self.compare_templates)

        if self._written:
            self.set_compiler(compiler)
                
            #Run them each individually and return a dictionary of all the
            #test results
            result = {}
            for composite_id in self.tgenerator.tests_to_run:
                identifier, testid = composite_id.split("|")
                oneresult = self._run_single(identifier, testid)
                if oneresult is not None:
                    result[composite_id] = oneresult

            return result
        else:
            msg.warn("you can't run tests until the executables have been written. Exiting.")
            return None
       
    def _run_single(self, identifier, testid):
        """Runs all unit test cases for the specified identifier."""
        #Just initialize a result object and populate its properties
        #using the various _run_* methods.
        result = TestResult(identifier, testid)
        result.compiled, result.target = self._run_compile(identifier, testid)
        if result.compiled:
            self._run_exec(identifier, testid, result)

        if self.tests(identifier)[testid].runchecks:
            #Only return a test result if the checks were actually run.
            return result

    def _compile_fortpyf90(self, tversion):
        """Compiles a fortpy.mod and fortpy.o for the current compiler.
        """
        msg.info("Compiling fortpy.mod and fortpy.f90 for {}".format(self.compiler))
        from os import waitpid, path
        from subprocess import Popen, PIPE
        from fortpy.testing.compilers import executor, replace
        command = "cd {0}; {1} fortpy.f90; {1} -c fortpy.f90".format(self.fortpy_templates, executor(self.compiler))
        pcompile = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
        waitpid(pcompile.pid, 0)

        opath = path.join(self.fortpy_templates, "fortpy.o")
        mpath = path.join(self.fortpy_templates, "fortpy.mod")
        if path.isfile(opath) and path.isfile(mpath):
            from shutil import move
            nopath = path.join(self.fortpy_templates, replace("fortpy.o.[c]", self.compiler))
            nmpath = path.join(self.fortpy_templates, replace("fortpy.mod.[c]", self.compiler))
            move(opath, nopath)
            move(mpath, nmpath)
            #Create the version files so we can keep track of the compiled versions.
            vpaths = [nopath + ".v", nmpath + ".v"]
            for vp in vpaths:
                with open(vp, 'w') as f:
                    f.write('#<fortpy version="{}" />'.format('.'.join(map(str, tversion))))
        else:
            msg.err("Unable to generate fortpy.o and fortpy.mod.")
        
    def _run_compile(self, identifier, testid):
        """Compiles the executable that was created for the specified identifier,
        returns True if the compile was successful."""
        #Because we often run the tests for multiple compiler versions, we need
        #a copy of the execution directory that was setup for the testing.
        from fortpy.testing.compilers import replace, executor, family
        from fortpy.utility import copytree
        from os import path
        source = path.join(self.libraryroot(identifier), identifier)        
        target = replace(source + ".[c]", self.compiler)
        copytree(source, target)

        #Before we compile, we need to make sure we have the fortpy.o and fortpy.mod
        #files for the specific compiler.
        tversion = self.template_version("fortpy.f90")
        for sdfile in ["fortpy.o", "fortpy.mod"]:
            fdfile = replace(sdfile + ".[c]", self.compiler)
            ftarget = path.join(target, sdfile)
            dversion = self.get_fortpy_version(ftarget)
                
            if not path.isfile(ftarget) or dversion != tversion:
                from shutil import copy
                source = path.join(self.fortpy_templates, fdfile)
                sversion = self.get_fortpy_version(source)
                if not path.isfile(source) or sversion != tversion:
                    self._compile_fortpyf90(tversion)
                    
                msg.info("   COPY: {}".format(source))
                copy(source, ftarget)
                #If the file is a binary, we need to save a .v with version
                #information as well for the next time we want to copy it.
                pre, ext = path.splitext(ftarget)
                if ext in [".o", ".so", ".mod"]:
                    with open(ftarget + '.v', 'w') as f:
                        f.write("# <fortpy version=\"{}\" />".format('.'.join(map(str, tversion))))
        
        #Find the target folder that has the executables etc then run
        #make and check the exit code.
        msg.blank()
        options = ""
        if self.debug:
            options += " DEBUG=true"
        if self.profile:
            options += " GPROF=true"

        codestr = "cd {}; make -f 'Makefile.{}' F90='{}' FAM='{}'" + options
        #If we are running in quiet mode, we don't want the compile information
        #to post to stdout; only errors should be redirected. This means we need
        #to wrap the execution in a subprocess and redirect the std* to PIPE
        from os import waitpid
        from subprocess import Popen, PIPE
        command = codestr.format(target, testid, executor(self.compiler), family(self.compiler))
        pcompile = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
        waitpid(pcompile.pid, 0)
        
        if not self.quiet:
            output = [x.decode('utf8') for x in pcompile.stdout.readlines()]
            msg.std(''.join(output))
        #else: #We don't need to get these lines since we are purposefully redirecting them.
        error = pcompile.stderr.readlines()
        code = len(error)
        if code != 0:
            msg.err(''.join(error))

        #It turns out that the compiler still returns a code of zero, even if the compile
        #failed because the actual compiler didn't fail; it did its job properly. We need to
        #check for the existence of errors in the 'compile.log' file.
        lcount = 0
        errors = []
        log = path.join(target, "compile.log")
        with open(log) as f:
            for line in f:
                lcount += 1
                if lcount > 21 and lcount < 32:
                    errors.append(line)
                elif lcount > 21:
                    break

        if len(errors) > 0:
            #There are 21 lines in the compile.log file when everything runs correctly
            #Overwrite code with a bad exit value since we have some other problems.
            code = 1
            #We want to write the first couple of errors to the console and give them the
            #option to still execute if the compile only generated warnings.
            msg.warn("compile generated some errors or warnings:")
            msg.blank()
            msg.info(''.join(errors))

            #If the executable exists, we could still prompt them to run it (in case the
            #additional lines were just warnings).
            exe = path.join(target, "{}.x".format(testid))
            if path.isfile(exe):
                choice = input("\nWould you still like to run the executable? ").lower()
                code = 0 if "y" in choice else code
                if "n" in choice:
                    msg.err("Unit testing terminated by user.")
                    exit(0)
            else:
                msg.err("Could not compile executable {}.x".format(testid))
                exit(-1)

        return code == 0, target

    def _run_exec(self, identifier, testid, result):
        """Runs the executable for unit test for the specified identifier
        for each of the outcomes specified in the doctags."""
        if not self.tests(identifier)[testid].execute:
            #We don't want to carry on with this execution at all. User-specified
            #override.
            return

        #Get the home path of the executable. A sub-folder for tests
        #needs to be created. For tests that have input and output files
        #a home/tests/testid.case folder gets created and the source files
        #get copied.

        #Create the folder for staging the tests.
        tests = path.join(result.target, "tests")
        if not path.exists(tests):
            mkdir(tests)
        
        #Now we need determine which tests to run from the outcomes and folder tags.
        kmodule, kmethod = identifier.lower().split(".")
        module = self.parser.modules[kmodule]
        method = module.executables[kmethod]

        #Get the absolute path to the executable that we created
        exepath = path.join(result.target, "{}.x".format(testid))

        #Since we have already split out all the tests that need to be run and 
        #we have a 'testid' for the current test to run, just run that test.
        self._run_folder(self.tests(identifier)[testid], tests, result, exepath,
                         self.writer(identifier))

        if not self.tests(identifier)[testid].runchecks:
            return
        
        #Now that we have run all of the executables, we can analyze their
        #output to see if it matches.
        for case in result.cases:
            xres = result.cases[case]
            xres.test(case, result)

    def _run_folder(self, testspec, testsfolder, result, exepath, testwriter):
        """Runs the executable for the sources in the folder doctag.

        :arg testspec: a TestSpecification instance for this unit test.
        :arg testsfolder: the path to the unit tests unique staging folder.
        :arg result: a TestResult instance that execution results can be
          added to as they complete.
        :arg expath: the full path to the executable to run in the folder.
        :arg testwriter: the MethodWriter instance that generated the test
          that will be run.
        """
        #We can use the same outcome tester afterwards for all of the cases.
        tester = OutcomeTester(testspec, self._codefolder, self.comparer, 
                               self.parser.verbose)

        #The execution can either be case-based or once-off.
        if testspec.cases is not None:
            #We need to run the executable multiple times, once for each case
            #Each case has input files specified relative to the code folder.
            for case in testspec.cases:
                caseid = "{}.{}".format(testspec.identifier, case)
                if not caseid in result.cases:
                    #Make a separate directory for the case and copy all its inputs.
                    casepath = path.join(testsfolder, caseid)
                    self._execute_testpath(testspec, testwriter, casepath, exepath, 
                                           result, tester, caseid, case)
                else:
                    result.warnings.append("Duplicate CASES specified for unit testing:" + 
                                           " {}".format(caseid))
        else:
            #Create a folder for this test specification to run in.
            testpath = path.join(testsfolder, testspec.identifier)
            self._execute_testpath(testspec, testwriter, testpath, exepath, result, 
                                   tester, testspec.identifier)

    def _execute_testpath(self, testspec, testwriter, testpath, exepath, result, 
                          tester, caseid, case=""):
        """Executes the unit test in the specified testing folder for 'case'."""
        if not path.exists(testpath):
            mkdir(testpath)

        #Copy across all the input files we need to run.
        for i in testspec.inputs:
            i.copy(self._codefolder, testpath, case, self.compiler)
        #Also copy any assignment file dependencies.
        testwriter.copy(self._codefolder, testpath, case, testspec.identifier, self.compiler)
        #Clean the testing folder to remove any target variable output files
        #from any earlier test runs.
        testspec.clean(testpath)
        testwriter.setup(testpath)

        #If the testspec needs auto-class support, write the case to file.
        if testspec.autoclass:
            with open(path.join(testpath, "fpy_case"), 'w') as f:
                f.write('"{}"'.format(case))
        
        #Save the path to the folder for execution in the result.
        result.paths[caseid] = testpath

        msg.okay("Executing {}.x in folder ./tests{}".format(testspec.identifier, 
                                                             testpath.split("tests")[1]))
        start_time = clock()                              
        from os import waitpid
        from subprocess import Popen, PIPE
        command = "cd {}; {}".format(testpath, exepath)
        prun = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
        waitpid(prun.pid, 0)        
        if not self.quiet:
            output = prun.stdout.readlines()
            if len(output) > 0:
                msg.std(''.join(output))
        #else: #We don't need to get these lines since we are purposefully redirecting them.
        error = prun.stderr.readlines()
        if len(error) > 0:
            if self.quiet:
                msg.info("With Executable at {}".format(exepath), 1)
            msg.err('\n  '+'  '.join(error))
        code = len(error)
        
        if case == "":
            result.cases[caseid] = ExecutionResult(testpath, code, clock() - start_time, tester)
        else:
            result.cases[caseid] = ExecutionResult(testpath, code, clock() - start_time, tester, case)
        self._write_success(testpath, code)

        #See if we need to run the post-execution profiling
        if self.profile:
            profiling.profile(testpath, testspec.testgroup.method_fullname, 
                              exepath, self.compiler)
Beispiel #4
0
class UnitTester(object):
    """Performs automatic unit testing of individual subroutines and
    functions based on XML doctags found in the code files.

    :arg libraryroot: the path to folder in which to stage the tests.
    """
    def __init__(self,
                 libraryroot=None,
                 verbose=False,
                 compare_templates=None,
                 fortpy_templates=None,
                 rerun=None,
                 compiler=None,
                 debug=False,
                 profile=False,
                 quiet=False,
                 strict=False):
        self.parser = CodeParser()
        self.parser.verbose = verbose
        from fortpy.utility import set_fortpy_templates
        set_fortpy_templates(self, fortpy_templates)
        self.tgenerator = TestGenerator(self.parser, libraryroot,
                                        self.fortpy_templates, self, rerun)
        self.compiler = None
        """The full path to the compiler to use for the unit tests.
        """
        self.quiet = quiet
        """Specifies whether the tester should run in quiet mode, which only prints
        essential information for the unit tests to stdout.
        """
        self.strict = strict
        """Specifies whether to compile files with *all* warnings enabled."""
        self.debug = debug == True
        self.set_compiler(compiler)
        self.profile = self._profiler_exists(profile)

        #A flag to track whether the generator has already written
        #the executables.
        self._written = False
        #The user's raw value for the compare_templates directory
        self._compare_templates = compare_templates

    def get_fortpy_version(self, fortpath):
        """Gets the fortpy version number from the first line of the specified file."""
        from fortpy.testing.compilers import get_fortpy_version
        return get_fortpy_version(self.compiler, fortpath)

    def tests(self, identifier):
        """Returns a dictionary of all the tests that need to be run for the
        specified module.executable identifier."""
        if identifier in self.tgenerator.xtests:
            return self.tgenerator.xtests[identifier]
        else:
            return {}

    def writer(self, identifier):
        """Returns the underlying executable writer that has a list of ordered
        method/assignment dependencies."""
        if identifier in self.tgenerator.xwriters:
            return self.tgenerator.xwriters[identifier]

    def libraryroot(self, identifier):
        """Returns the absolute path to the staging directory for the unit tests with
        the specified testid.
        """
        if identifier in self.tgenerator.xgenerator.folders:
            return self.tgenerator.xgenerator.folders[identifier]

    def _profiler_exists(self, profile):
        """Tests whether we have gprof available to do the profiling of the methods
        to unit test.
        
        :arg profile: the value specified in the UnitTester constructor.
        """
        if profile == True:
            gprof = self.which("gprof")
            if gprof is None:
                msg.err("gprof is required to run profiling with fortpy.")
                exit(1)
            else:
                return True
        else:
            return False

    def set_compiler(self, compiler):
        """Sets the compiler to use for the unit testing of this code parser.
        """
        if compiler is not None:
            self.compiler = compiler
            self._compiler_exists()

    def _compiler_exists(self):
        """Tests whether the specified compiler is available on the machine. If
        it isn't give an error and exit."""
        from fortpy.testing.compilers import compilers
        if self.compiler in compilers:
            #Overwrite the *name* of the compiler with its full path; since
            #fortpy assumes that self.compiler is the name of a valid executable
            #this will still work correctly.
            compiler = compilers[self.compiler].path
        else:
            compiler = self.compiler

        if self.which(compiler) is None:
            msg.err("compiler {} not found. Exiting.".format(self.compiler))
            exit(1)

    def which(self, program):
        """Tests whether the specified program is anywhere in the environment
        PATH so that it probably exists."""
        import os

        def is_exe(fpath):
            return os.path.isfile(fpath) and os.access(fpath, os.X_OK)

        fpath, fname = os.path.split(program)
        if fpath:
            if is_exe(program):
                return program
        else:
            for path in os.environ["PATH"].split(os.pathsep):
                path = path.strip('"')
                exe_file = os.path.join(path, program)
                if is_exe(exe_file):
                    return exe_file

        return None

    def writeall(self, codefolder):
        """Writes all the unit test executables that are new or modified
        as well as the makefiles for all subroutines in all modules."""
        #The test generator already loops over all modules in the code
        #parser and does all the heavy-lifting. We need to pre-load any
        #modules that we are interested in testing. Only those loaded
        #when write() is first called will have their tests processed.
        self._codefolder = path.abspath(codefolder)
        if self._compare_templates is not None:
            self.compare_templates = path.abspath(self._compare_templates)
        else:
            self.compare_templates = path.join(self._codefolder, "templates/")

        #We will load all the modules in the code folder specified and
        #then run the test generator.
        files = {}
        self.parser.scan_path(self._codefolder, files)
        for f in files:
            filepath = files[f]
            self.parser.parse(filepath, True, True)

        #Now that we have loaded all the codefiles in the path, we can
        #generate the unittest executables
        self.tgenerator.write(self._codefolder)
        self._written = True

    def runall(self, compiler=None):
        """Compiles and runs each new or modified unit test executable.
        After the run is complete, the outcomes are checked for consistency.

        :arg compiler: the name of the compiler in 'compilers.xml' to use.
        """
        #We will likely need a file comparer for the outcomes testing
        self.comparer = FileComparer(self.fortpy_templates,
                                     self.compare_templates)

        if self._written:
            self.set_compiler(compiler)

            from fortpy.testing.compilers import replace
            from fortpy.testing.auxiliary import generate
            from fortpy.utility import copy

            #Run them each individually and return a dictionary of all the
            #test results
            result = {}
            fpyauxs = []
            for composite_id in self.tgenerator.tests_to_run:
                identifier, testid = composite_id.split("|")
                #Compile and copy fpy_auxiliary if it isn't in the identifiers directory yet.
                source = path.join(self.libraryroot(identifier), identifier)
                target = replace(source + ".[c]", self.compiler)
                if self.writer(
                        identifier).autoclass and identifier not in fpyauxs:
                    code, success, fpytarget = generate(
                        self.parser, self._codefolder,
                        self.libraryroot(identifier), self.compiler,
                        self.debug, self.profile, self.strict)
                    opath = path.join(fpytarget, "fpy_auxiliary.o")
                    mpath = path.join(fpytarget, "fpy_auxiliary.mod")
                    copy(opath, target)
                    copy(mpath, target)
                    fpyauxs.append(identifier)

                oneresult = self._run_single(identifier, testid, source)
                if oneresult is not None:
                    result[composite_id] = oneresult

            return result
        else:
            msg.warn(
                "you can't run tests until the executables have been written. Exiting."
            )
            return None

    def _run_single(self, identifier, testid, staging):
        """Runs all unit test cases for the specified identifier."""
        #Just initialize a result object and populate its properties
        #using the various _run_* methods.
        result = TestResult(identifier, testid)
        result.compiled, result.target = self._run_compile(
            identifier, testid, staging)
        if result.compiled:
            self._run_exec(identifier, testid, result)

        if self.tests(identifier)[testid].runchecks:
            #Only return a test result if the checks were actually run.
            return result

    def _run_compile(self, identifier, testid, staging):
        """Compiles the executable that was created for the specified identifier,
        returns True if the compile was successful."""
        #We need to make sure that the fpy_auxiliary .mod and .o files are available
        #if the test uses auto-class functionality.
        from os import path
        from fortpy.testing.compilers import compile_general
        msg.blank()
        code, success, target = compile_general(staging,
                                                self.compiler,
                                                testid,
                                                self.debug,
                                                self.profile,
                                                self.quiet,
                                                strict=self.strict)

        if not success:
            #There are 21 lines in the compile.log file when everything runs correctly
            #Overwrite code with a bad exit value since we have some other problems.
            code = 1

            #If the executable exists, we could still prompt them to run it (in case the
            #additional lines were just warnings).
            exe = path.join(target, "{}.x".format(testid))
            if path.isfile(exe):
                choice = raw_input(
                    "\nWould you still like to run the executable? ").lower()
                code = 0 if "y" in choice else code
                if "n" in choice:
                    msg.err("Unit testing terminated by user.")
                    exit(0)
            else:
                msg.err("Could not compile executable {}.x".format(testid))
                exit(-1)

        return code == 0, target

    def _run_exec(self, identifier, testid, result):
        """Runs the executable for unit test for the specified identifier
        for each of the outcomes specified in the doctags."""
        if not self.tests(identifier)[testid].execute:
            #We don't want to carry on with this execution at all. User-specified
            #override.
            return

        #Get the home path of the executable. A sub-folder for tests
        #needs to be created. For tests that have input and output files
        #a home/tests/testid.case folder gets created and the source files
        #get copied.

        #Create the folder for staging the tests.
        tests = path.join(result.target, "tests")
        if not path.exists(tests):
            mkdir(tests)

        #Now we need determine which tests to run from the outcomes and folder tags.
        kmodule, kmethod = identifier.lower().split(".")
        module = self.parser.modules[kmodule]
        method = module.executables[kmethod]

        #Get the absolute path to the executable that we created
        exepath = path.join(result.target, "{}.x".format(testid))

        #Since we have already split out all the tests that need to be run and
        #we have a 'testid' for the current test to run, just run that test.
        self._run_folder(
            self.tests(identifier)[testid], tests, result, exepath,
            self.writer(identifier))

        if not self.tests(identifier)[testid].runchecks:
            return

        #Now that we have run all of the executables, we can analyze their
        #output to see if it matches.
        for case in result.cases:
            xres = result.cases[case]
            xres.test(case, result)

    def _run_folder(self, testspec, testsfolder, result, exepath, testwriter):
        """Runs the executable for the sources in the folder doctag.

        :arg testspec: a TestSpecification instance for this unit test.
        :arg testsfolder: the path to the unit tests unique staging folder.
        :arg result: a TestResult instance that execution results can be
          added to as they complete.
        :arg expath: the full path to the executable to run in the folder.
        :arg testwriter: the MethodWriter instance that generated the test
          that will be run.
        """
        #We can use the same outcome tester afterwards for all of the cases.
        tester = OutcomeTester(testspec, self._codefolder, self.comparer,
                               self.parser.verbose)

        #The execution can either be case-based or once-off.
        if testspec.cases is not None:
            #We need to run the executable multiple times, once for each case
            #Each case has input files specified relative to the code folder.
            for case in testspec.cases:
                caseid = "{}.{}".format(testspec.identifier, case)
                if not caseid in result.cases:
                    #Make a separate directory for the case and copy all its inputs.
                    casepath = path.join(testsfolder, caseid)
                    self._execute_testpath(testspec, testwriter, casepath,
                                           exepath, result, tester, caseid,
                                           case)
                else:
                    result.warnings.append(
                        "Duplicate CASES specified for unit testing:" +
                        " {}".format(caseid))
        else:
            #Create a folder for this test specification to run in.
            testpath = path.join(testsfolder, testspec.identifier)
            self._execute_testpath(testspec, testwriter, testpath, exepath,
                                   result, tester, testspec.identifier)

    def _execute_testpath(self,
                          testspec,
                          testwriter,
                          testpath,
                          exepath,
                          result,
                          tester,
                          caseid,
                          case=""):
        """Executes the unit test in the specified testing folder for 'case'."""
        if not path.exists(testpath):
            mkdir(testpath)

        #Copy across all the input files we need to run.
        for i in testspec.inputs:
            i.copy(self._codefolder, testpath, case, self.compiler)
        #Also copy any assignment file dependencies.
        testwriter.copy(self._codefolder, testpath, case, testspec.identifier,
                        self.compiler)
        #Clean the testing folder to remove any target variable output files
        #from any earlier test runs.
        testspec.clean(testpath)
        testwriter.setup(testspec.identifier, testpath)

        #If the testspec needs auto-class support, write the case to file.
        if testspec.autoclass:
            with open(path.join(testpath, "fpy_case"), 'w') as f:
                f.write('"{}"'.format(case))

        #Save the path to the folder for execution in the result.
        result.paths[caseid] = testpath

        msg.okay("Executing {}.x in folder ./tests{}".format(
            testspec.identifier,
            testpath.split("tests")[1]))
        start_time = clock()
        from os import waitpid
        from subprocess import Popen, PIPE
        command = "cd {}; {}".format(testpath, exepath)
        prun = Popen(command,
                     shell=True,
                     executable="/bin/bash",
                     stdout=PIPE,
                     stderr=PIPE)
        waitpid(prun.pid, 0)
        if not self.quiet:
            output = prun.stdout.readlines()
            if len(output) > 0:
                msg.std(''.join(output))
        #else: #We don't need to get these lines since we are purposefully redirecting them.
        error = prun.stderr.readlines()
        if len(error) > 0:
            if self.quiet:
                msg.info("With Executable at {}".format(exepath), 1)
            msg.err('\n  ' + '  '.join(error))
        code = len(error)

        if case == "":
            result.cases[caseid] = ExecutionResult(testpath, code,
                                                   clock() - start_time,
                                                   tester)
        else:
            result.cases[caseid] = ExecutionResult(testpath, code,
                                                   clock() - start_time,
                                                   tester, case)
        self._write_success(testpath, code)

        #See if we need to run the post-execution profiling
        if self.profile:
            profiling.profile(testpath, testspec.testgroup.method_fullname,
                              exepath, self.compiler)
Beispiel #5
0
                           case=""):
        """Cleans up and sets results for the test that ran.
        """
        if case == "":
            result.cases[caseid] = ExecutionResult(testpath, code,
                                                   clock() - start_time,
                                                   tester)
        else:
            result.cases[caseid] = ExecutionResult(testpath, code,
                                                   clock() - start_time,
                                                   tester, case)
        self._write_success(testpath, code)

        #See if we need to run the post-execution profiling
        if self.profile:
            profiling.profile(testpath, testspec.testgroup.method_fullname,
                              exepath, self.compiler)

    def _write_success(self, testpath, code):
        """Creates a SUCCESS file in the specified testpath if code==0 that has
        the time of the last execution. If code != 0, any existing SUCCESS file
        is deleted.
        """
        sucpath = path.join(testpath, "SUCCESS")
        if code == 0:
            with open(sucpath, 'w') as f:
                f.write(time.strftime("%c"))
        else:
            if path.isfile(sucpath):
                remove(sucpath)