Esempio n. 1
0
def initialize():
    from fortpy.msg import set_verbosity
    set_verbosity(args["verbose"])
    
    comparer = FileComparer(template_folder=args["templates"])
    result = comparer.compare(args["source"], args["target"], args["xmltemplate"], args["mode"])

    if args["save"]:
        from os import path
        fullpath = path.abspath(path.expanduser(args["save"]))
        with open(fullpath, "w") as f:
            f.write(print_compare_result(result, args["verbose"]))
    else:
        print((print_compare_result(result, args["verbose"])))

    return result
Esempio n. 2
0
    def _run_compare_file(self, exepath, outvar, coderoot, caseid):
        """Compares an output file from an executable with its model output using
        settings in the doctag.

        :arg exe: the full path to the output file from the unit test.
        :arg outvar: the TestOutput instance with testing specifications.
        :arg coderoot: the full path to the folder that has all the code files.
        :arg caseid: the identifier for the specific test case being tested.
        """
        #First get the comparison results, then analyze them using the tolerances
        #etc. from the doctag.
        targetpath = outvar.abspath(coderoot, caseid)
        result = self.comparer.compare(exepath, targetpath, outvar.template, outvar.mode)

        #Write the results out to file as a record. If the result is none create
        #a file that says so. The default file name is the output file name with
        #an extra extension of .compare
        resultpath = exepath + ".compare"
        with open(resultpath, "w") as f:
            if result is not None:          
                f.write(print_compare_result(result, self.verbose))
            else:
                f.write("The result comparison failed. Check the unit test console output.")
            
        return result
Esempio n. 3
0
    def _run_compare_autoclass(self, exepath, outvar, coderoot, caseid):
        """Compares an output *folder* from an executable (for a variable saved using
        the autoclass feature) with its model output using settings in the doctag.

        :arg exe: the full path to the output folder from the unit test.
        :arg outvar: the TestOutput instance with testing specifications.
        :arg coderoot: the full path to the folder that has all the code files.
        :arg caseid: the identifier for the specific test case being tested.
        """
        #We need to get a list of all the files in the model and test folders
        #for the variable and then compare each of them with _run_compare_file.
        #Luckily, there are no nested folders, the files are saved linearly and
        #the filenames contain the recursive complexity of the variable.
        from os import walk, path, mkdir
        modelpath = outvar.abspath(coderoot, caseid)
        mfiles = []
        xfiles = []
        for (dirpath, dirnames, filenames) in walk(modelpath):
            mfiles.extend(filenames)
            break

        for (dirpath, dirnames, filenames) in walk(exepath):
            xfiles.extend(filenames)
            break

        #Create a directory for all the .compare files of the member variables
        #in the auto-classed variable.
        compath = exepath + ".compare"
        if not path.isdir(compath):
            mkdir(compath)

        #Hopefully we get a one-to-one match, otherwise we have to record the
        #set difference as failures.
        onlym = []
        mx = []
        summary = {}
        for m in mfiles:
            if m[0] != "_":
                #Ignore the files that don't follow the convention; otherwise the
                #statistics will be messed up.
                continue
            if m in xfiles:
                xpath = path.join(exepath, m)
                mpath = path.join(modelpath, m)
                mxres = self.comparer.compare(xpath, mpath, outvar.template,
                                              outvar.mode)
                mx.append(mxres)
                summary[m] = (mxres.percent_match, mxres.common_match)

                #Write a comparison report for this particular variable.
                cpath = path.join(compath, m)
                with open(cpath, "w") as f:
                    if mxres is not None:
                        f.write(print_compare_result(mxres, self.verbose))
                    else:
                        f.write(
                            "The result comparison failed. Check the unit test console output."
                        )
            else:
                onlym.append(m)
        onlyx = [f for f in xfiles if (f[0] == "_" and f not in mfiles)]

        #It turns out to be useful to have a summary file that lists the percentage
        #for each file comparison. Otherwise, it is hard to track down where the errors
        #have occurred.
        from fortpy.testing.results import ACResult
        reltot = len(summary) + len(onlyx) + len(onlym)
        result = ACResult(mx, onlym, onlyx, outvar.actolerance)
        slines = [
            "{0:.2%} success ({1:.2%} common) TOTAL MATCH".format(
                result.percent_match, result.common_match), "",
            "OUTPUT : {}".format(exepath), "MODEL  : {}".format(modelpath), "",
            "Files in both MODEL and TEST directories ({}/{}):".format(
                len(summary), reltot)
        ]
        for mfile, match in summary.items():
            slines.append("{0} => {1:.2%} success ({2:.2%} common)".format(
                mfile, match[0], match[1]))

        if len(onlym) > 0:
            slines.append("")
            slines.append(
                "Files only present in the MODEL output ({}/{}):".format(
                    len(onlym), reltot))
            slines.extend(onlym)
        if len(onlyx) > 0:
            slines.append("")
            slines.append(
                "Files only present in the TEST output ({}/{}):".format(
                    len(onlyx), reltot))
            slines.extend(onlyx)

        sfile = path.join(compath, "summary")
        with open(sfile, 'w') as f:
            f.write('\n'.join(slines))

        return result
Esempio n. 4
0
    def _run_compare_autoclass(self, exepath, outvar, coderoot, caseid):
        """Compares an output *folder* from an executable (for a variable saved using
        the autoclass feature) with its model output using settings in the doctag.

        :arg exe: the full path to the output folder from the unit test.
        :arg outvar: the TestOutput instance with testing specifications.
        :arg coderoot: the full path to the folder that has all the code files.
        :arg caseid: the identifier for the specific test case being tested.
        """
        #We need to get a list of all the files in the model and test folders
        #for the variable and then compare each of them with _run_compare_file.
        #Luckily, there are no nested folders, the files are saved linearly and
        #the filenames contain the recursive complexity of the variable.
        from os import walk, path, mkdir
        modelpath = outvar.abspath(coderoot, caseid)
        mfiles = []
        xfiles = []
        for (dirpath, dirnames, filenames) in walk(modelpath):
            mfiles.extend(filenames)
            break

        for (dirpath, dirnames, filenames) in walk(exepath):
            xfiles.extend(filenames)
            break

        #Create a directory for all the .compare files of the member variables
        #in the auto-classed variable.
        compath = exepath + ".compare"
        if not path.isdir(compath):
            mkdir(compath)

        #Hopefully we get a one-to-one match, otherwise we have to record the
        #set difference as failures.
        onlym = []
        mx = []
        for m in mfiles:
            if m[0] != "_":
                #Ignore the files that don't follow the convention; otherwise the
                #statistics will be messed up.
                continue
            if m in xfiles:
                xpath = path.join(exepath, m)
                mpath = path.join(modelpath, m)
                mxres = self.comparer.compare(xpath, mpath, outvar.template, outvar.mode)
                mx.append(mxres)

                #Write a comparison report for this particular variable.
                cpath = path.join(compath, m)
                with open(cpath, "w") as f:
                    if mxres is not None:
                        f.write(print_compare_result(mxres, self.verbose))
                    else:
                        f.write("The result comparison failed. Check the unit test console output.")
            else:
                onlym.append(m)
        onlyx = [f for f in xfiles if (f[0] == "_" and f not in mfiles)]

        #Create a single file for each of the only lists that gives the set difference
        if len(onlym) > 0:
            with open(path.join(compath, "model_only"), 'w') as f:
                f.write('\n'.join(onlym))
        if len(onlyx) > 0:
            with open(path.join(compath, "test_only"), 'w') as f:
                f.write('\n'.join(onlyx))

        from fortpy.testing.results import ACResult
        return ACResult(mx, onlym, onlyx, outvar.actolerance)
Esempio n. 5
0
    def _run_compare_autoclass(self, exepath, outvar, coderoot, caseid):
        """Compares an output *folder* from an executable (for a variable saved using
        the autoclass feature) with its model output using settings in the doctag.

        :arg exe: the full path to the output folder from the unit test.
        :arg outvar: the TestOutput instance with testing specifications.
        :arg coderoot: the full path to the folder that has all the code files.
        :arg caseid: the identifier for the specific test case being tested.
        """
        #We need to get a list of all the files in the model and test folders
        #for the variable and then compare each of them with _run_compare_file.
        #Luckily, there are no nested folders, the files are saved linearly and
        #the filenames contain the recursive complexity of the variable.
        from os import walk, path, mkdir
        modelpath = outvar.abspath(coderoot, caseid, self.compiler)
        mfiles = []
        xfiles = []
        for (dirpath, dirnames, filenames) in walk(modelpath):
            mfiles.extend(filenames)
            break

        for (dirpath, dirnames, filenames) in walk(exepath):
            xfiles.extend(filenames)
            break

        #Create a directory for all the .compare files of the member variables
        #in the auto-classed variable.
        compath = exepath + ".compare"
        if not path.isdir(compath):
            mkdir(compath)

        #Hopefully we get a one-to-one match, otherwise we have to record the
        #set difference as failures.
        onlym = []
        mx = []
        summary = {}
        for m in mfiles:
            if m[0] != "_" or ".fpy" in m:
                #Ignore the files that don't follow the convention; otherwise the
                #statistics will be messed up.
                continue
            if m in xfiles:
                xpath = path.join(exepath, m)
                mpath = path.join(modelpath, m)
                #We can't use the outvar template for autoclass because the contents
                #probably all have different types. Instead, we have to detect the
                #template from the file headers.
                mxres = self.comparer.compare(xpath, mpath, None, outvar.mode)
                mx.append(mxres)
                summary[m] = (mxres.percent_match, mxres.common_match)

                #Write a comparison report for this particular variable.
                cpath = path.join(compath, m)
                with open(cpath, "w") as f:
                    if mxres is not None:
                        f.write(print_compare_result(mxres, self.verbose))
                    else:
                        f.write("The result comparison failed. Check the unit test console output.")
            else:
                onlym.append(m)
        onlyx = [f for f in xfiles if (f[0] == "_" and ".fpy" not in f and f not in mfiles)]

        #It turns out to be useful to have a summary file that lists the percentage
        #for each file comparison. Otherwise, it is hard to track down where the errors
        #have occurred.
        from fortpy.testing.results import ACResult
        reltot = len(summary) + len(onlyx) + len(onlym)
        result = ACResult(mx, onlym, onlyx, outvar.actolerance)
        slines = ["{0:.2%} success ({1:.2%} common) TOTAL MATCH".format(result.percent_match, result.common_match),
                  "",
                  "OUTPUT : {}".format(exepath),
                  "MODEL  : {}".format(modelpath),
                  "",
                  "Files in both MODEL and TEST directories ({}/{}):".format(len(summary), reltot)]
        for mfile, match in summary.items():
            slines.append("{0} => {1:.2%} success ({2:.2%} common)".format(mfile, match[0], match[1]))

        if len(onlym) > 0:
            slines.append("")
            slines.append("Files only present in the MODEL output ({}/{}):".format(len(onlym), reltot))
            slines.extend(onlym)
        if len(onlyx) > 0:
            slines.append("")
            slines.append("Files only present in the TEST output ({}/{}):".format(len(onlyx), reltot))
            slines.extend(onlyx)

        sfile = path.join(compath, "summary")
        with open(sfile, 'w') as f:
            f.write('\n'.join(slines))

        return result