def _postAnalyze(self): """ Write the logs. """ self._paths = [] self.logger.write( '%s\nFRACTIONAL ERROR (Measured vs Entered)' % ('='*80)) self._process('Error', 'wDev', 'lDev', self.trackDeviations) self.logger.write('%s\nFRACTIONAL UNCERTAINTY ERROR' % ('='*80)) self._process( 'Uncertainty Error', 'wDelta', 'lDelta', None, absoluteOnly=True) csv = CsvWriter( path=self.getPath('Length-Width-Deviations.csv'), autoIndexFieldName='Index', fields=[ ('uid', 'UID'), ('fingerprint', 'Fingerprint'), ('wDelta', 'Width Deviation'), ('lDelta', 'Length Deviation') ]) for entry in self.entries: track = entry['track'] csv.createRow( uid=track.uid, fingerprint=track.fingerprint, wDelta=entry.get('wDelta', -1.0), lDelta=entry.get('lDelta', -1.0) ) csv.save() self._processAspectRatios() self.mergePdfs(self._paths)
def _postAnalyze(self): self.logger.write('%s gauge calculated tracks' % self._count) self._trackwayCsv.save() csv = CsvWriter( path=self.getPath('Simple-Gauge-Errors.csv', isFile=True), autoIndexFieldName='Index', fields=[ ('uid', 'UID'), ('fingerprint', 'Fingerprint') ]) for track in self._errorTracks: csv.createRow(uid=track.uid, fingerprint=track.fingerprint) csv.save() if self._errorTracks: self.logger.write('Failed to calculate gauge for %s tracks' % len(self._errorTracks)) csv = CsvWriter( path=self.getPath('Simple-Gauge-Ignores.csv', isFile=True), autoIndexFieldName='Index', fields=[ ('uid', 'UID'), ('fingerprint', 'Fingerprint') ]) for track in self._ignoreTracks: csv.createRow(uid=track.uid, fingerprint=track.fingerprint) csv.save() if self._ignoreTracks: self.logger.write('%s tracks lacked suitability for gauge calculation' % len( self._ignoreTracks)) plotData = [ ('stride', 'red', 'AU', 'Stride-Normalized Weighted'), ('pace', 'green', 'AU', 'Pace-Normalized Weighted'), ('width', 'blue', 'AU', 'Width-Normalized Weighted'), ('abs', 'purple', 'm', 'Absolute Unweighted') ] for data in plotData: out = [] source = ListUtils.sortListByIndex( source=getattr(self._trackwayGauges, StringUtils.toStr2(data[0])), index=0, inPlace=True) for item in source: out.append(PositionValue2D(x=len(out), y=item[1].value, yUnc=item[1].uncertainty)) self._plotTrackwayGauges(out, *data[1:]) self.mergePdfs(self._paths, 'Gauges.pdf')
def _analyzeTrackway(self, trackway, sitemap): """ @param trackway: @param sitemap: @return: """ self.entries = dict(lp=[], rp=[], lm=[], rm=[]) super(SimulationCsvExporterStage, self)._analyzeTrackway( trackway=trackway, sitemap=sitemap ) csv = CsvWriter( autoIndexFieldName='Index', fields=[ 'lp_name', 'lp_uid', 'lp_x', 'lp_dx', 'lp_y', 'lp_dy', 'rp_name', 'rp_uid', 'rp_x', 'rp_dx', 'rp_y', 'rp_dy', 'lm_name', 'lm_uid', 'lm_x', 'lm_dx', 'lm_y', 'lm_dy', 'rm_name', 'rm_uid', 'rm_x', 'rm_dx', 'rm_y', 'rm_dy' ] ) length = max( len(self.entries['lp']), len(self.entries['rp']), len(self.entries['lm']), len(self.entries['rm']), ) for index in range(length): items = [] for limb_id, entries in self.entries.items(): if index < len(entries): items += entries[index].items() else: items += self._create_entry(limb_id).items() csv.addRow(dict(items)) path = self.owner.settings.fetch('EXPORT_DATA_PATH') if path is None: path = self.owner.getLocalPath('Simulation', 'data', isFile=True) path = FileUtils.makeFilePath(path, trackway.name, 'source.csv') directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) if csv.save(path): print('[SAVED]:', path) else: print('[ERROR]: Unable to save CSV at "{}"'.format(path))
def _process( self, label, widthKey, lengthKey, trackDeviations, absoluteOnly =False ): """_process doc...""" pl = self.plot ws = [] ls = [] w2D = [] l2D = [] for entry in self.entries: if widthKey in entry: ws.append(entry[widthKey]) if lengthKey in entry: w2D.append(entry[widthKey]) if lengthKey in entry: ls.append(entry[lengthKey]) if widthKey in entry: l2D.append(entry[lengthKey]) plotList = [ ('widths', ws, 'Width', 'b'), ('lengths', ls, 'Length', 'r')] wRes = NumericUtils.getMeanAndDeviation(ws) self.logger.write('Width %ss' % wRes.label) lRes = NumericUtils.getMeanAndDeviation(ls) self.logger.write('Length %ss' % lRes.label) for data in plotList: if not absoluteOnly: d = data[1] self._paths.append( self._makePlot( label, d, data, histRange=(-1.0, 1.0))) self._paths.append( self._makePlot( label, d, data, isLog=True, histRange=(-1.0, 1.0))) # noinspection PyUnresolvedReferences d = np.absolute(np.array(data[1])) self._paths.append( self._makePlot( 'Absolute ' + label, d, data, histRange=(0.0, 1.0))) self._paths.append( self._makePlot( 'Absolute ' + label, d, data, isLog=True, histRange=(0.0, 1.0))) self.owner.createFigure('twoD') pl.hist2d(w2D, l2D, bins=20, range=([-1, 1], [-1, 1])) pl.title('2D %s Distribution' % label) pl.xlabel('Width %s' % label) pl.ylabel('Length %s' % label) pl.xlim(-1.0, 1.0) pl.ylim(-1.0, 1.0) path = self.getTempPath( '%s.pdf' % StringUtils.getRandomString(16), isFile=True) self.owner.saveFigure('twoD', path) self._paths.append(path) csv = CsvWriter() csv.path = self.getPath( '%s-Deviations.csv' % label.replace(' ', '-'), isFile=True) csv.addFields( ('uid', 'UID'), ('fingerprint', 'Fingerprint'), ('wSigma', 'Width Deviation'), ('lSigma', 'Length Deviation') ) count = 0 for entry in self.entries: widthDevSigma = NumericUtils.roundToOrder( abs(entry.get(widthKey, 0.0)/wRes.uncertainty), -2) lengthDevSigma = NumericUtils.roundToOrder( abs(entry.get(lengthKey, 0.0)/lRes.uncertainty), -1) if widthDevSigma > 2.0 or lengthDevSigma > 2.0: count += 1 track = entry['track'] data = dict( wSigma=widthDevSigma, lSigma=lengthDevSigma) if trackDeviations is not None: trackDeviations[track.uid] = data csv.createRow( uid=track.uid, fingerprint=track.fingerprint, **data) if not csv.save(): self.logger.write( '[ERROR]: Failed to save CSV file to %s' % csv.path) percentage = NumericUtils.roundToOrder( 100.0*float(count)/float(len(self.entries)), -2) self.logger.write('%s significant %ss (%s%%)' % ( count, label.lower(), percentage)) if percentage > (100.0 - 95.45): self.logger.write( '[WARNING]: Large deviation count exceeds' + 'normal distribution expectations.')
class SimpleGaugeStage(CurveOrderedAnalysisStage): """A class for...""" #=============================================================================== # C L A S S _GAUGE_DATA_NT = namedtuple('GAUGE_DATA_NT', ['abs', 'width', 'pace', 'stride']) #_______________________________________________________________________________ def __init__(self, key, owner, **kwargs): """Creates a new instance of SimpleGaugeStage.""" super(SimpleGaugeStage, self).__init__( key, owner, label='Simple Track Gauge', **kwargs) self._paths = [] self._errorTracks = [] self._ignoreTracks = [] self._trackwayGauges = None self._count = 0 self._trackwayCsv = None #=============================================================================== # P R O T E C T E D #_______________________________________________________________________________ def _preAnalyze(self): self._trackwayGauges = self._GAUGE_DATA_NT([], [], [], []) self._paths = [] self._errorTracks = [] self._ignoreTracks = [] self._count = 0 self._trackwayCsv = CsvWriter( path=self.getPath('Trackway-Gauge-Averages.csv'), autoIndexFieldName='Index', fields=[ ('name', 'Name'), ('count', 'Pes Count'), ('abs', 'Absolute'), ('absUnc', 'Abs Unc'), ('stride', 'Stride Norm'), ('strideUnc', 'Stride Unc'), ('pace', 'Pace Norm'), ('paceUnc', 'Pace Unc'), ('width', 'Width Norm'), ('widthUnc', 'Width Unc')]) #_______________________________________________________________________________ def _analyzeSitemap(self, sitemap): self._createDrawing(sitemap, 'SIMPLE-GAUGE', 'Simple-Gauge') super(SimpleGaugeStage, self)._analyzeSitemap(sitemap) self._saveDrawing(sitemap) #_______________________________________________________________________________ def _analyzeTrackway(self, trackway, sitemap): bundle = self.owner.getSeriesBundle(trackway) if not bundle.isReady: # Skip trackways that have incomplete series return data = self._collectGaugeData(bundle, trackway, sitemap) if data['gauges'].abs: self._processGaugeData(bundle, trackway, data) #_______________________________________________________________________________ def _collectGaugeData(self, bundle, trackway, sitemap): """ Collects the trackway gauge data by generating projections for each series and then iterating over every track in the trackway and extracting the gauge information from the CurveSeries projection data. @param bundle: TrackSeriesBundle @param trackway: Tracks_Trackway @param sitemap: Tracks_Sitemap @return: dict """ trackway.cache.set('data', {'points':[], 'gauges':self._GAUGE_DATA_NT([], [], [], [])}) for key, series in bundle.items(): series.cache.set('referenceWidth', series.averageTrackWidth) if series.count < 2: continue curve = CurveSeries(stage=self, series=series) try: curve.compute() except Exception as err: self.logger.writeError([ '[ERROR]: Failed to compute track curve projection', 'TRACKWAY: %s' % trackway.name, 'SERIES: %s[%s]' % (series.fingerprint, series.count) ], err) raise curve.draw(sitemap.cache.get('drawing'), drawPairs=False) series.cache.set('curve', curve) super(SimpleGaugeStage, self)._analyzeTrackway(trackway=trackway, sitemap=sitemap) for key, series in bundle.items(): series.cache.remove('referenceWidth') return trackway.cache.extract('data') #_______________________________________________________________________________ def _processGaugeData(self, bundle, trackway, data): pesCount = bundle.leftPes.count + bundle.rightPes.count record = {'name':trackway.name, 'count':pesCount} gaugeData = data['gauges'] try: value = NumericUtils.getWeightedMeanAndDeviation(gaugeData.abs) record['abs'] = value.value record['absUnc'] = value.uncertainty self._trackwayGauges.abs.append((pesCount, value)) except ZeroDivisionError: return widthValue = NumericUtils.getWeightedMeanAndDeviation(gaugeData.width) record['width'] = widthValue.value record['widthUnc'] = widthValue.uncertainty self._trackwayGauges.width.append((pesCount, widthValue)) if gaugeData.pace: value = NumericUtils.getWeightedMeanAndDeviation(gaugeData.pace) record['pace'] = value.value record['paceUnc'] = value.uncertainty self._trackwayGauges.pace.append((pesCount, value)) else: record['pace'] = '' record['paceUnc'] = '' if gaugeData.stride: value = NumericUtils.getWeightedMeanAndDeviation(gaugeData.stride) record['stride'] = value.value record['strideUnc'] = value.uncertainty self._trackwayGauges.stride.append((pesCount, value)) else: record['stride'] = '' record['strideUnc'] = '' self._trackwayCsv.addRow(record) plot = ScatterPlot( data=data['points'], title='%s Width-Normalized Gauges (%s)' % (trackway.name, widthValue.label), xLabel='Track Position (m)', yLabel='Gauge (AU)') self._paths.append(plot.save(self.getTempFilePath(extension='pdf'))) analysisTrackway = trackway.getAnalysisPair(self.analysisSession) analysisTrackway.simpleGauge = widthValue.raw analysisTrackway.simpleGaugeUnc = widthValue.rawUncertainty #_______________________________________________________________________________ def _analyzeTrack(self, track, series, trackway, sitemap): if series.count < 1 or not track.pes: return segmentPair = None segmentSeries = None skipped = 0 for key, otherSeries in series.bundle.items(): if not otherSeries.pes: continue if otherSeries == series or otherSeries.left == series.left or otherSeries.count < 2: # If the series isn't suitable for comparison then mark this as a skipped attempt # and continue. skipped += 1 continue segment = otherSeries.cache.get('curve').getTrackSegment(track) if segment is None: continue for pair in segment.pairs: if pair['track'] != track: continue if segmentPair is None or pair['line'].length.raw < segmentPair['line'].length.raw: # Store the shortest of the available gauge lengths segmentPair = pair segmentSeries = otherSeries break if skipped == 4: # If skipped is 4 it means that no suitable series existed for calculating a gauge # value and the method should abort quietly self._ignoreTracks.append(track) return if segmentPair is None: self._errorTracks.append(track) return color = 'blue' if segmentSeries.pes == series.pes else 'orange' data = trackway.cache.get('data') gauges = data['gauges'] line = segmentPair['line'] sitemap.cache.get('drawing').lineSegment( line, stroke=color, stroke_width=1, stroke_opacity='0.5') length = line.length gauges.abs.append(length) analysisTrack = track.getAnalysisPair(self.analysisSession) analysisTrack.simpleGauge = line.length.raw analysisTrack.simpleGaugeUnc = line.length.rawUncertainty widthNormGauge = NumericUtils.divideValueUncertainties( numeratorValue=length, denominatorValue=series.cache.get('referenceWidth')) gauges.width.append(widthNormGauge) point = PositionValue2D( x=analysisTrack.curvePosition, xUnc=0.0, y=widthNormGauge.value, yUnc=widthNormGauge.uncertainty) data['points'].append(point) if analysisTrack.paceLength: gauges.pace.append(NumericUtils.divideValueUncertainties( length, analysisTrack.paceLengthValue)) if analysisTrack.strideLength: gauges.stride.append(NumericUtils.divideValueUncertainties( length, analysisTrack.strideLengthValue)) self._count += 1 #_______________________________________________________________________________ def _postAnalyze(self): self.logger.write('%s gauge calculated tracks' % self._count) self._trackwayCsv.save() csv = CsvWriter( path=self.getPath('Simple-Gauge-Errors.csv', isFile=True), autoIndexFieldName='Index', fields=[ ('uid', 'UID'), ('fingerprint', 'Fingerprint') ]) for track in self._errorTracks: csv.createRow(uid=track.uid, fingerprint=track.fingerprint) csv.save() if self._errorTracks: self.logger.write('Failed to calculate gauge for %s tracks' % len(self._errorTracks)) csv = CsvWriter( path=self.getPath('Simple-Gauge-Ignores.csv', isFile=True), autoIndexFieldName='Index', fields=[ ('uid', 'UID'), ('fingerprint', 'Fingerprint') ]) for track in self._ignoreTracks: csv.createRow(uid=track.uid, fingerprint=track.fingerprint) csv.save() if self._ignoreTracks: self.logger.write('%s tracks lacked suitability for gauge calculation' % len( self._ignoreTracks)) plotData = [ ('stride', 'red', 'AU', 'Stride-Normalized Weighted'), ('pace', 'green', 'AU', 'Pace-Normalized Weighted'), ('width', 'blue', 'AU', 'Width-Normalized Weighted'), ('abs', 'purple', 'm', 'Absolute Unweighted') ] for data in plotData: out = [] source = ListUtils.sortListByIndex( source=getattr(self._trackwayGauges, StringUtils.toStr2(data[0])), index=0, inPlace=True) for item in source: out.append(PositionValue2D(x=len(out), y=item[1].value, yUnc=item[1].uncertainty)) self._plotTrackwayGauges(out, *data[1:]) self.mergePdfs(self._paths, 'Gauges.pdf') #_______________________________________________________________________________ def _plotTrackwayGauges(self, points, color, unit, heading): histData = [] for p in points: histData.append(p.yValue.value) plot = Histogram( data=histData, title='%s Trackway Gauges' % heading, xLabel='Averaged Trackway Gauge (%s)' % unit, yLabel='Frequency', color=color) self._paths.insert(0, plot.save(self.getTempFilePath(extension='pdf'))) plot = ScatterPlot( data=ListUtils.sortObjectList(points, 'y', inPlace=True), title='%s Trackway Gauges' % heading, xLabel='Trackway Pes Count (#)', yLabel='Averaged Trackway Gauge (%s)' % unit, color=color) self._paths.insert(0, plot.save(self.getTempFilePath(extension='pdf')))
def write_to_file(trackway, tracks_data): """ @param trackway: @param tracks_data: @return: """ csv = CsvWriter( path="{}.csv".format(trackway.name), autoIndexFieldName="Index", fields=[ "lp_name", "lp_uid", "lp_x", "lp_dx", "lp_y", "lp_dy", "lp_w", "lp_dw", "lp_l", "lp_dl", "rp_name", "rp_uid", "rp_x", "rp_dx", "rp_y", "rp_dy", "rp_w", "rp_dw", "rp_l", "rp_dl", "lm_name", "lm_uid", "lm_x", "lm_dx", "lm_y", "lm_dy", "lm_w", "lm_dw", "lm_l", "lm_dl", "rm_name", "rm_uid", "rm_x", "rm_dx", "rm_y", "rm_dy", "rm_w", "rm_dw", "rm_l", "rm_dl", ], ) count = max([len(ts) if ts else 0 for ts in tracks_data.values()]) for i in range(count): entry = {} for key in ["lp", "rp", "lm", "rm"]: data = tracks_data[key][i] if i < len(tracks_data[key]) else None track = data["track"] if data else None entry.update( { "{}_name".format(key): track.fingerprint if track else "", "{}_uid".format(key): track.uid if track else "", } ) point = track.positionValue if track else None entry.update( { "{}_x".format(key): point.x if point else "", "{}_dx".format(key): point.xUnc if point else "", "{}_y".format(key): point.y if point else "", "{}_dy".format(key): point.yUnc if point else "", } ) length = track.widthValue if track else None entry.update( { "{}_l".format(key): length.value if length else "", "{}_dl".format(key): length.uncertainty if length else "", } ) width = track.widthValue if track else None entry.update( { "{}_w".format(key): width.value if width else "", "{}_dw".format(key): width.uncertainty if width else "", } ) csv.createRow(**entry) csv.save()