def compareTimelines(self, data, reference, fields): """Compare timeline data and return the statistics @param data: the name of the data directory @param reference:the name of the directory with the reference data @param fields: list of the fields to compare""" sample = TimelinePlot(args=[ self.caseDir, "--silent", "--dir=" + data, "--reference-dir=" + reference, "--compare", "--basic-mode=lines", "--metrics" ] + ["--field=" + f for f in fields]) return Data2DStatistics(metrics=sample["metrics"], compare=sample["compare"], noStrings=True, failureValue=0)
def compareSamples(self, data, reference, fields, time=None, line=None, scaleData=1, offsetData=0, scaleX=1, offsetX=0, useReferenceForComparison=False): """Compare sample data and return the statistics @param data: the name of the data directory @param reference:the name of the directory with the reference data @param fields: list of the fields to compare @param time: the time to compare for. If empty the latest time is used""" timeOpt=["--latest-time"] if time: timeOpt=["--time="+str(time)] if line: timeOpt+=["--line=%s" % line] addOpt=[] if useReferenceForComparison: addOpt.append("--use-reference-for-comparison") sample=SamplePlot(args=[self.caseDir, "--silent", "--dir="+data, "--reference-dir="+reference, "--tolerant-reference-time", "--compare", "--index-tolerant-compare", "--common-range-compare", "--metrics", "--scale-data=%f" % scaleData, "--scale-x=%f" % scaleX, "--offset-data=%f" % offsetData, "--offset-x=%f" % offsetX ]+ timeOpt+ addOpt+ ["--field="+f for f in fields]) return Data2DStatistics(metrics=sample["metrics"], compare=sample["compare"], noStrings=True, failureValue=0)
def postRunTestVelocityProfilesOldSchool(self): # Just as an example how to do it detailed sample = SamplePlot(args=[ self.caseDir, "--silent", "--dir=sets", "--reference-dir=referenceSet", "--latest-time", "--tolerant-reference-time", "--field=U", "--compare", "--metrics" ]) stat = Data2DStatistics(metrics=sample["metrics"], compare=sample["compare"], noStrings=True) relError = stat.relativeError() for l in relError.columns(): for com in relError.rows(): self.isEqual(value=relError[(com, l)], tolerance=self["velocityRelativeTolerance"], message="Match velocty component %s on line %s" % (com, l))
def run(self): data = self.readPickledData() result = {"originalData": data} if self.opts.metricsName in data: metrics = data[self.opts.metricsName] else: self.error("Metrics set", self.opts.metricsName, "not in", list(data.keys())) if self.opts.metricsName == self.opts.compareName: self.warning("Metrics and comparison", self.opts.compareName, "are the same. No comparison used") self.opts.compareName = None if self.opts.compareName == None: compare = None elif self.opts.compareName in data: compare = data[self.opts.compareName] else: self.error("Compare set", self.opts.compareName, "not in", list(data.keys())) stat = Data2DStatistics(metrics, compare=compare, small=self.opts.smallThreshold) result["statistics"] = stat for f in self.opts.field: print_("\nField", f) try: val = stat[f] print_(val) result[f] = val except KeyError: print_(" .... not present in", stat.names()) for f in self.opts.function: for v in self.opts.field: print_("\nFunction", f, "on field", v) try: val = stat.func(f, v) print_(val) result["%s on %s" % (f, v)] = val except KeyError: print_(" .... not present in", stat.names()) if self.opts.relativeError: print_("\nRelative Error") val = stat.relativeError() print_(val) result["relativeError"] = val if self.opts.relativeAverageError: print_("\nRelative Average Error") val = stat.relativeAverageError() print_(val) result["relativeAverageError"] = val if self.opts.range: print_("\nData range") val = stat.range() print_(val) result["dataRange"] = val self.setData(result)