Beispiel #1
0
 def test_2_writepy(self):
     """Tests the writing of the python module that interacts with the fortran
     shared library via ctypes.
     """
     for modname in self.originals:
         msg.info("Writing python wrapper module {}.py".format(modname))
         self.writers[modname].write_py()
         msg.okay("Finished writing {}.py".format(modname))
Beispiel #2
0
def check_pointers(parser, codedir=None, mfilter=None, recursive=False):
    """Checks the modules in the specified code parser to see if they
    have common, but subtle, pointer bugs in:

    1. subroutines with a parameter of intent(out) and user-derived type
      must* set *all* members of that parameter or they will have an
      *undefined* status.
    2. pointer-type arrays that are not nullified are set to a valid target
      will return 'T' when passed to `associated`. Best practice is to nullify
      pointer arrays in user-derived types as the default value on those types.
    
    :arg parser: [fortpy.code.CodeParser] with the modules to search *already loaded*.
    :arg codedir: specify the full path to the library whose modules should be searched,
      just another way to filter which modules are generating the warnings.
    :arg mfilter: filter to apply to module names; can use the wildcard standard
      from bash.
    """
    from fnmatch import fnmatch
    from fortpy.msg import std, set_verbosity, info

    set_verbosity(0)
    W1 = "   {} '{}' does not set the value of members '{}' in parameter '{}'."
    W2 = "   Type '{}' does not nullify members '{}' on creation."

    offenders = {}
    for (modname, module) in parser.modules.items():
        if not recursive and codedir is not None and not codedir.lower() in module.filepath.lower():
            continue
        if mfilter is not None and not fnmatch(module.name.lower(), mfilter.lower()):
            continue

        #Test the first condition above for all subroutines in the module; also handle
        #the recursively defined subroutines.
        hprinted = False
        for xname, xvalue in module.executables.items():
            oparams, pmembers = _exec_check_pointers(xvalue)
            if len(oparams) > 0:
                if not hprinted:
                    info("Best practice suggestions: {}".format(module.filepath))
                    hprinted = True
                    
                for oparam in oparams:
                    plist = ', '.join([p.name for p in pmembers[oparam]])                
                    std(W1.format(type(xvalue).__name__, xname, plist, oparam), 0)
                offenders[xvalue.full_name] = (oparams, pmembers)

        for tname, tvalue in module.types.items():
            result = _type_check_pointers(tvalue)
            if len(result) > 0:
                if not hprinted:
                    info("Best practice suggestions: {}".format(module.filepath))
                    hprinted = True

                plist = ', '.join([p.name for p in result])
                std(W2.format(tname, plist), 0)            
                offenders[xvalue.full_name] = result

    return offenders
Beispiel #3
0
 def test_0_writef90(self):
     """Tests the writing of the wrapper F90 module as well as the compilation
     of the *.a library for the dependency modules required by symmetry_module.f90
     """
     for modname, module in self.parser.modules.items():
         msg.info("Writing wrapper module for {}".format(modname))
         fwriter = f90.WrapperModule(module, "celib", self.fdir, link="/Users/trunks/codes/celib/trunk/celib.a")
         self.writers[modname] = fwriter
         fwriter.write_f90()
         msg.okay("Finished writing {}_c.f90".format(modname))
Beispiel #4
0
def _compile_simple(compiler, modnames, folder):
    """Compiles the specified list of modules (in specified order) to produce a .mod and a .o
    for each of them.
    """
    for modname in modnames:
        msg.info("Compiling {0}.mod and {0}.o for {1}".format(modname, compiler))
    
    from os import waitpid, path
    from subprocess import Popen, PIPE
    codefiles = ' '.join(["{}.f90".format(m) for m in modnames])
    command = "cd {0}; {1} {2}; {1} -c {2}".format(folder, executor(compiler), codefiles)
    pcompile = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
    waitpid(pcompile.pid, 0)
Beispiel #5
0
    def _compile(self, dirpath, makename, compiler, debug, profile):
        """Compiles the makefile at the specified location with 'compiler'.

        :arg dirpath: the full path to the directory where the makefile lives.
        :arg compiler: one of ['ifort', 'gfortran'].
        :arg makename: the name of the make file to compile.
        """
        from os import path
        options = ""
        if debug:
            options += " DEBUG=true"
        if profile:
            options += " GPROF=true"

        from os import system
        codestr = "cd {}; make -f '{}' F90={} FAM={}" + options
        code = system(codestr.format(dirpath, makename, compiler, compiler[0]))

        #It turns out that the compiler still returns a code of zero, even if the compile
        #failed because the actual compiler didn't fail; it did its job properly. We need to
        #check for the existence of errors in the 'compile.log' file.
        lcount = 0
        errors = []
        log = path.join(dirpath, "compile.log")
        with open(log) as f:
            for line in f:
                lcount += 1
                if lcount > 21 and lcount < 32:
                    errors.append(line)
                elif lcount > 21:
                    break

        if len(errors) > 0:
            #There are 21 lines in the compile.log file when everything runs correctly
            #Overwrite code with a bad exit value since we have some other problems.
            code = 1
            #We want to write the first couple of errors to the console and give them the
            #option to still execute if the compile only generated warnings.
            msg.warn("compile generated some errors or warnings:")
            msg.blank()
            msg.info(''.join(errors))

        return code
Beispiel #6
0
def _ensure_fileversion(compiler, modname, folder, target, trycompile=True):
    """Makes sure that the module's f90, mod and o files are up to date with the template
    version. If they aren't compile and copy them.

    :arg compiler: the compiler key from compilers.xml
    :arg modname: the name of the module to check file versions for and move (e.g. "fortpy").
    :arg folder: the folder that contains the up-to-date, "template" version of the module.
    :arg target: the folder to copy the compiled files to.
    :arg trycompile: if the codefile has not been compiled yet, or if the version is out of 
      date, should the code try a simple compile?
    """
    from os import path
    codefile = "{}.f90".format(modname)
    compfiles = ["{}.{}".format(modname, ext) for ext in ["o", "mod"]]
    
    tversion = template_version(compiler, codefile)
    for sdfile in compfiles:
        fdfile = replace(sdfile + ".[c]", compiler)
        ftarget = path.join(target, sdfile)
        dversion = get_fortpy_version(compiler, ftarget)

        if not path.isfile(ftarget) or dversion != tversion:
            source = path.join(folder, fdfile)
            sversion = get_fortpy_version(compiler, source)
            if trycompile and (not path.isfile(source) or sversion != tversion):
                _compile_simple(compiler, [modname], folder)
                _vupdate_compiled_module(compiler, modname, folder, tversion)
            elif not path.isfile(source):
                msg.warn("{} does not exist.".format(source))
                continue
            elif sversion != tversion:
                msg.warn("{} has an old version number.".format(source))

            from fortpy.utility import copyfile
            msg.info("   COPY: {}".format(source))
            copyfile(source, ftarget)
            #If the file is a binary, we need to save a .v with version
            #information as well for the next time we want to copy it.
            pre, ext = path.splitext(ftarget)
            if ext in [".o", ".so", ".mod"]:
                with open(ftarget + '.v', 'w') as f:
                    f.write("# <fortpy version=\"{}\" />".format('.'.join(map(str, tversion))))
Beispiel #7
0
def _compile_simple(compiler, modnames, folder):
    """Compiles the specified list of modules (in specified order) to produce a .mod and a .o
    for each of them.
    """
    for modname in modnames:
        msg.info("Compiling {0}.mod and {0}.o for {1}".format(
            modname, compiler))

    from os import waitpid, path
    from subprocess import Popen, PIPE
    codefiles = ' '.join(["{}.f90".format(m) for m in modnames])
    command = "cd {0}; {1} {2}; {1} -c {2}".format(folder, executor(compiler),
                                                   codefiles)
    pcompile = Popen(command,
                     shell=True,
                     executable="/bin/bash",
                     stdout=PIPE,
                     stderr=PIPE,
                     close_fds=True)
    pcompile.stdout.close()
    pcompile.stderr.close()
    waitpid(pcompile.pid, 0)
Beispiel #8
0
 def _find_executables(self):
     """Finds the list of executables that pass the requirements necessary to have
     a wrapper created for them.
     """
     if len(self.needs) > 0:
         return
     
     for execname, executable in list(self.module.executables.items()):
         skip = False
         #At the moment we can't handle executables that use special derived types.
         if not execname in self.module.publics or not executable.primitive:
             msg.info("Skipping {}.{} because it is not public.".format(self.module.name, execname))
             skip = True
         #Check that all the parameters have intent specified, otherwise we won't handle them well.ha
         if any([p.direction == "" for p in executable.ordered_parameters]):
             msg.warn("Some parameters in {}.{} have no intent".format(self.module.name, execname) +
                      " specified. Can't wrap that executable.")
             skip = True
 
         if not skip:
             self.uses.append(execname)
             for depmod in executable.search_dependencies():
                 if depmod not in self.needs:
                     self.needs.append(depmod)
Beispiel #9
0
def _prompt_model(wizard, parameter, target):
    """Prompts the user for details about the model output for comparison.
    """
    #This is essentially the prompt to setup the <output> tags. Look for the existing
    #<output> in the test specification.
    if target.compareto is None or target.compareto is not in wizard.tauto.outputs:
        from xml.etree.ElementTree import Element
        output = Element("output", {"identifier": "{}.auto".format(target.name)})
        newoutput = True
    else:
        output = wizard.tauto.outputs[target.compareto].xml
        newoutput = False

    skeys = ["Automate.", "Set a constant value.", "Don't compare to model output."]
    choice = _prompt_general("Choose the source of the model output:", skeys)
    attribs = {
        "tolerance": {"leader": "Enter % accuracy required for comparisons: 0.0 to 1.0 (default)?",
                      "cast": float}
    }
    #Keep track of whether we actually need to set model output for this parameter.
    skip = choice == 2
    
    if choice == 0:
        #The biggest problem we have here is that we need to specify the file to use
        #for the model output. We could have the user search for one, but really the
        #way this works is that we need to compile the test, run it without checks and
        #then present the user with the output to verify for each test case. Since the
        #automator presents the input parameters first and then allows the targets to
        #be established without model outputs first, we should be able to compile and
        #run the test (if it hasn't been done already).
        rdict = {
            1: _examine_output,
            2: _print_outpath,
            3: _start_debug,
            4: _set_correct,
            5: _set_existing
        }
        rkeys = ["Re-run the tests to re-create output(s).",
                 "Examine the variables' output(s) for correctness.",
                 "Print the location of the model output(s).",
                 "Start the debugger for the unit test program.",
                 "Set the variable output(s) as correct.",
                 "Specify an existing file as model output.",
                 "Exit the correction loop."]
        varfile = None
        correct = True
        runonce = False

        while correct:
            msg.blank()
            if has_outputs(wizard, True):
                rchoice = _prompt_general("The model output for the active test case exists.\n"
                                          "What would you like to do?", rkeys)
                if rchoice in rdict:
                    varfile = rdict[rchoice](wizard, parameter, target)
                    if rchoice == 4:
                        correct = False
                elif rchoice == 0:
                    run(wizard, True, True)
                elif rchoice == 5:
                    correct = False
            else:
                #First run the tests to generate the output, then present it to the user
                #so that they can check it is right.
                if not runonce:
                    msg.info("Running the unit test to generate output for examination.")
                    run(wizard, False, True)
                    runonce = True
                else:
                    msg.warn("the model outputs weren't generated by running the unit test.\n"
                             "Check error messages.")
                    correct = False
            
        if varfile is not None:
            output.set("file", varfile)
                
        if "autoclass" in target.xml.attrib and target.xml.attrib["autoclass"] == "true":
            output.set("autoclass", "true")
            if "tolerance" in selattrs:
                output.set("actolerance", selattrs["tolerance"])
    elif choice == 1:
        attribs["value"] = {"leader": "Enter the correct value; can be any valid python code."}

    if skip:
        #We need to remove an output if one already exists; otherwise do nothing.
        if target.compareto is not None:
            if target.compareto in wizard.tauto.outputs:
                del wizard.tauto.outputs[target.compareto]
            target.compareto = None
    else:
        #Prompts for the last, sundry attributes on the <output> tag.
        selattrs = _prompt_attributes("output", attribs)
        for k, v in selattrs.items():
            output.set(k, v)
            
        if newoutput:
            target.compareto = output.attrib["identifier"]
            wizard.tauto.outputs[target.compareto] = TestOutput(output)
Beispiel #10
0
def compile(folder, compiler=None, identifier=None, debug=False, profile=False,
            quiet=False, moptions=None, inclfortpy=True, vupdates=None,
            strict=False, inclfpyaux=False):
    """Runs the makefile in the specified folder to compile the 'all' rule.

    :arg vupdates: a list of module names for which the output .mod and .o files
      should have version information attached.
    """
    if inclfortpy:
        #Before we can compile the library, we need to make sure that we have a fortpy
        #.mod and .o compiled with the *same* compiler version specified.
        from fortpy.utility import get_fortpy_templates_dir
        _ensure_fileversion(compiler, "fortpy", get_fortpy_templates_dir(), folder)
        
    options = ""
    if debug:
        options += " DEBUG=true"
    if profile:
        options += " GPROF=true"
    if strict:
        options += " STRICT=true"    

    if moptions is not None:
        for opt in moptions:
            options += " {}".format(opt)
        
    if identifier is not None:
        codestr = "cd {}; make -f 'Makefile.{}' F90='{}' FAM='{}'" + options
        command = codestr.format(folder, identifier, executor(compiler), family(compiler))
    else:
        codestr = "cd {}; make F90='{}' FAM='{}'" + options
        command = codestr.format(folder, executor(compiler), family(compiler))
        
    #If we are running in quiet mode, we don't want the compile information
    #to post to stdout; only errors should be redirected. This means we need
    #to wrap the execution in a subprocess and redirect the std* to PIPE
    from os import waitpid, path
    from subprocess import Popen, PIPE
    pcompile = Popen(command, shell=True, executable="/bin/bash", stdout=PIPE, stderr=PIPE)
    waitpid(pcompile.pid, 0)
    
    if not quiet:
        output = [x.decode('utf8') for x in pcompile.stdout.readlines()]
        msg.std(''.join(output))
    #else: #We don't need to get these lines since we are purposefully redirecting them.
    error = [x.decode('utf8') for x in pcompile.stderr.readlines()]
    code = len(error)
    if code != 0:
        msg.err(''.join(error))

    #It turns out that the compiler still returns a code of zero, even if the compile
    #failed because the actual compiler didn't fail; it did its job properly. We need to
    #check for the existence of errors in the 'compile.log' file.
    lcount = 0
    errors = []
    log = path.join(folder, "compile.log")
    with open(log) as f:
        for line in f:
            lcount += 1
            if lcount > 21 and lcount < 32:
                errors.append(line)
            elif lcount > 21:
                break

    if len(errors) > 0:
        #There are 21 lines in the compile.log file when everything runs correctly
        #Overwrite code with a bad exit value since we have some other problems.
        code = 1
        #We want to write the first couple of errors to the console and give them the
        #option to still execute if the compile only generated warnings.
        msg.warn("compile generated some errors or warnings:")
        msg.blank()
        msg.info(''.join(errors))

    if vupdates is not None:
        for modname in vupdates:
            _vupdate_compiled_module(compiler, modname, folder, rename=False)
        
    return (code, len(errors)==0)
Beispiel #11
0
def compile(folder,
            compiler=None,
            identifier=None,
            debug=False,
            profile=False,
            quiet=False,
            moptions=None,
            inclfortpy=True,
            vupdates=None,
            strict=False,
            inclfpyaux=False):
    """Runs the makefile in the specified folder to compile the 'all' rule.

    :arg vupdates: a list of module names for which the output .mod and .o files
      should have version information attached.
    """
    if inclfortpy:
        #Before we can compile the library, we need to make sure that we have a fortpy
        #.mod and .o compiled with the *same* compiler version specified.
        from fortpy.utility import get_fortpy_templates_dir
        _ensure_fileversion(compiler, "fortpy", get_fortpy_templates_dir(),
                            folder)

    options = ""
    if debug:
        options += " DEBUG=true"
    if profile:
        options += " GPROF=true"
    if strict:
        options += " STRICT=true"

    if moptions is not None:
        for opt in moptions:
            options += " {}".format(opt)

    if identifier is not None:
        codestr = "cd {}; make -f 'Makefile.{}' F90='{}' FAM='{}'" + options
        command = codestr.format(folder, identifier, executor(compiler),
                                 family(compiler))
    else:
        codestr = "cd {}; make F90='{}' FAM='{}'" + options
        command = codestr.format(folder, executor(compiler), family(compiler))

    #If we are running in quiet mode, we don't want the compile information
    #to post to stdout; only errors should be redirected. This means we need
    #to wrap the execution in a subprocess and redirect the std* to PIPE
    from os import waitpid, path
    from subprocess import Popen, PIPE
    pcompile = Popen(command,
                     shell=True,
                     executable="/bin/bash",
                     stdout=PIPE,
                     stderr=PIPE,
                     close_fds=True)
    waitpid(pcompile.pid, 0)

    if not quiet:
        output = [x.decode('utf8') for x in pcompile.stdout.readlines()]
        msg.std(''.join(output))
    #else: #We don't need to get these lines since we are purposefully redirecting them.
    error = [x.decode('utf8') for x in pcompile.stderr.readlines()]
    code = len(error)
    if code != 0:
        msg.err(''.join(error))

    pcompile.stdout.close()
    pcompile.stderr.close()
    #It turns out that the compiler still returns a code of zero, even if the compile
    #failed because the actual compiler didn't fail; it did its job properly. We need to
    #check for the existence of errors in the 'compile.log' file.
    lcount = 0
    errors = []
    log = path.join(
        folder, "compile.{}.log".format(
            identifier if identifier is not None else "default"))
    with open(log) as f:
        for line in f:
            lcount += 1
            if lcount > 21 and lcount < 32:
                errors.append(line)
            elif lcount > 21:
                break

    if len(errors) > 0:
        #There are 21 lines in the compile.log file when everything runs correctly
        #Overwrite code with a bad exit value since we have some other problems.
        code = 1
        #We want to write the first couple of errors to the console and give them the
        #option to still execute if the compile only generated warnings.
        msg.warn("compile generated some errors or warnings:")
        msg.blank()
        msg.info(''.join(errors))

    if vupdates is not None:
        for modname in vupdates:
            _vupdate_compiled_module(compiler, modname, folder, rename=False)

    return (code, len(errors) == 0)