def makePlot(label, tracks): tracks = tracks.copy() xBounds = [ NumericUtils.roundToOrder(tracks.length.min() - 0.05, -1, math.floor), NumericUtils.roundToOrder(tracks.length.max() + 0.05, -1, math.ceil)] yBounds = [ NumericUtils.roundToOrder(tracks.width.min() - 0.05, -1, math.floor), NumericUtils.roundToOrder(tracks.width.max() + 0.05, -1, math.ceil)] fig = plotlyTools.make_subplots( rows=1, cols=2, subplot_titles=('Length vs Width','Aspect Ratios'), print_grid=False) traces = [] for site in tracks.site.unique(): color = PlotConfigs.SITE_SPECS[site]['color'] siteSlice = tracks[tracks.site == site] traces.append(plotlyGraph.Scatter( name=site, mode='markers', xaxis='x1', yaxis='y1', marker=plotlyGraph.Marker(color=color), x=siteSlice.length, y=siteSlice.width)) traces.append(plotlyGraph.Box( name=site, y=siteSlice.length/siteSlice.width, marker=plotlyGraph.Marker(color=color), xaxis='x2', yaxis='y2')) fig['data'] += plotlyGraph.Data(traces) fig['layout'].update( title='%s Length & Width by Tracksite' % label, xaxis1=plotlyGraph.XAxis( title='Length (m)', range=xBounds, autorange=False ), yaxis1=plotlyGraph.YAxis( title='Width (m)', range=yBounds, autorange=False )) url = plotly.plot( filename='A16/%s-Length-Width' % label, figure_or_data=fig, auto_open=False) print('PLOT[%s]:' % label, PlotlyUtils.toEmbedUrl(url))
def test_roundToOrder(self): """test_roundToOrder doc...""" self.assertAlmostEqual(123.3, NumericUtils.roundToOrder(123.345, -1)) # Using the round operator, which rounds 5 up when odd, down when even self.assertAlmostEqual(123.34, NumericUtils.roundToOrder(123.345, -2)) self.assertAlmostEqual(123.36, NumericUtils.roundToOrder(123.355, -2)) self.assertAlmostEqual(123, NumericUtils.roundToOrder(123.345, 0)) self.assertAlmostEqual(120, NumericUtils.roundToOrder(123.345, 1)) self.assertAlmostEqual(100, NumericUtils.roundToOrder(123.345, 2))
def _analyzeTrackSeries(self, series, trackway, sitemap): if len(series.tracks) < 2: return prev_track = series.tracks[0] for track in series.tracks[1:]: stride_line = LineSegment2D( start=prev_track.positionValue, end=track.positionValue) stride_angle = stride_line.angle abs_angle = Angle(degrees=prev_track.rotation) if not prev_track.left: local_angle = stride_angle.differenceBetween(abs_angle) else: local_angle = abs_angle.differenceBetween(stride_angle) has_field_measurements = not prev_track.hasImportFlag( ImportFlagsEnum.NO_FIELD_MEASUREMENTS ) if has_field_measurements: measuredRotation = prev_track.rotationMeasured difference = round(abs(measuredRotation - local_angle.degrees)) deviation = NumericUtils.roundToOrder( value=difference/prev_track.rotationUncertainty, orderOfMagnitude=-2) else: measuredRotation = '' difference = '' deviation = '' self._csv.createRow( uid=prev_track.uid, fingerprint=prev_track.fingerprint, difference=difference, deviation=deviation, localRotation=round(local_angle.degrees), measuredRotation=measuredRotation) prev_track = track
def value(self): uncertainty = self.uncertainty order = NumericUtils.orderOfLeastSigFig(uncertainty) return NumericUtils.roundToOrder(self._raw, order)
def _process(self): """_processDeviations doc...""" errors = [] for entry in self.entries: if 'fractional' in entry: errors.append(entry['fractional']) res = NumericUtils.getMeanAndDeviation(errors) self.logger.write('Fractional Stride Error %s' % res.label) label = 'Fractional Stride Errors' self._paths.append(self._makePlot( label=label, data=errors, histRange=(-1.0, 1.0) )) self._paths.append(self._makePlot( label=label, data=errors, isLog=True, histRange=(-1.0, 1.0) )) # noinspection PyUnresolvedReferences d = np.absolute(np.array(errors)) self._paths.append(self._makePlot( label='Absolute %s' % label, data=d, histRange=(0.0, 1.0) )) self._paths.append(self._makePlot( label='Absolute %s' % label, data=d, isLog=True, histRange=(0.0, 1.0) )) highDeviationCount = 0 for entry in self.entries: if 'measured' not in entry: # Skip tracks that have no measured stride value for comparison continue if entry['deviation'] > 2.0: highDeviationCount += 1 track = entry['track'] delta = NumericUtils.roundToSigFigs(100.0*abs(entry['delta']), 3) self._csv.addRow({ 'fingerprint':track.fingerprint, 'uid':track.uid, 'measured':entry['measured'].label, 'entered':entry['entered'].label, 'dev':entry['deviation'], 'delta':delta}) if not self._csv.save(): self.logger.write( '[ERROR]: Failed to save CSV file %s' % self._csv.path) percentage = NumericUtils.roundToOrder( 100.0*float(highDeviationCount)/float(len(self.entries)), -2) self.logger.write( '%s significant %s (%s%%)' % ( highDeviationCount, label.lower(), percentage)) if percentage > (100.0 - 95.45): self.logger.write( '[WARNING]: Large deviation count exceeds normal ' + 'distribution expectations.')
def _process( self, label, widthKey, lengthKey, trackDeviations, absoluteOnly =False ): """_process doc...""" pl = self.plot ws = [] ls = [] w2D = [] l2D = [] for entry in self.entries: if widthKey in entry: ws.append(entry[widthKey]) if lengthKey in entry: w2D.append(entry[widthKey]) if lengthKey in entry: ls.append(entry[lengthKey]) if widthKey in entry: l2D.append(entry[lengthKey]) plotList = [ ('widths', ws, 'Width', 'b'), ('lengths', ls, 'Length', 'r')] wRes = NumericUtils.getMeanAndDeviation(ws) self.logger.write('Width %ss' % wRes.label) lRes = NumericUtils.getMeanAndDeviation(ls) self.logger.write('Length %ss' % lRes.label) for data in plotList: if not absoluteOnly: d = data[1] self._paths.append( self._makePlot( label, d, data, histRange=(-1.0, 1.0))) self._paths.append( self._makePlot( label, d, data, isLog=True, histRange=(-1.0, 1.0))) # noinspection PyUnresolvedReferences d = np.absolute(np.array(data[1])) self._paths.append( self._makePlot( 'Absolute ' + label, d, data, histRange=(0.0, 1.0))) self._paths.append( self._makePlot( 'Absolute ' + label, d, data, isLog=True, histRange=(0.0, 1.0))) self.owner.createFigure('twoD') pl.hist2d(w2D, l2D, bins=20, range=([-1, 1], [-1, 1])) pl.title('2D %s Distribution' % label) pl.xlabel('Width %s' % label) pl.ylabel('Length %s' % label) pl.xlim(-1.0, 1.0) pl.ylim(-1.0, 1.0) path = self.getTempPath( '%s.pdf' % StringUtils.getRandomString(16), isFile=True) self.owner.saveFigure('twoD', path) self._paths.append(path) csv = CsvWriter() csv.path = self.getPath( '%s-Deviations.csv' % label.replace(' ', '-'), isFile=True) csv.addFields( ('uid', 'UID'), ('fingerprint', 'Fingerprint'), ('wSigma', 'Width Deviation'), ('lSigma', 'Length Deviation') ) count = 0 for entry in self.entries: widthDevSigma = NumericUtils.roundToOrder( abs(entry.get(widthKey, 0.0)/wRes.uncertainty), -2) lengthDevSigma = NumericUtils.roundToOrder( abs(entry.get(lengthKey, 0.0)/lRes.uncertainty), -1) if widthDevSigma > 2.0 or lengthDevSigma > 2.0: count += 1 track = entry['track'] data = dict( wSigma=widthDevSigma, lSigma=lengthDevSigma) if trackDeviations is not None: trackDeviations[track.uid] = data csv.createRow( uid=track.uid, fingerprint=track.fingerprint, **data) if not csv.save(): self.logger.write( '[ERROR]: Failed to save CSV file to %s' % csv.path) percentage = NumericUtils.roundToOrder( 100.0*float(count)/float(len(self.entries)), -2) self.logger.write('%s significant %ss (%s%%)' % ( count, label.lower(), percentage)) if percentage > (100.0 - 95.45): self.logger.write( '[WARNING]: Large deviation count exceeds' + 'normal distribution expectations.')
def _analyzeSitemap(self, sitemap): """_analyzeSitemap doc...""" smCount = 0 smInCompCount = 0 ignores = 0 #------------------------------------------------------------------------------------------- # SITE MAP TRACKS # Iterate through all the tracks within a sitemap and look for hidden or orphaned # tracks to account for any that may not be loaded by standard means. Any tracks # found this way are removed from the all list created above, which specifies that # they were found by other means. tracks = sitemap.getAllTracks() trackways = self.owner.getTrackways(sitemap) processed = [] for t in tracks: if t.uid in self._allTracks: del self._allTracks[t.uid] if t.next and t.next == t.uid: self.logger.write([ '[ERROR]: Circular track reference (track.uid == track.next)', 'TRACK: %s (%s)' % (t.fingerprint, t.uid) ]) if not t.hidden and t.next: continue prev = t.getPreviousTrack() if prev and not t.hidden: continue elif not t.hidden: # Check for solo tracks, i.e. tracks that are the only track in their series and # would appear to be orphaned even though they are in a series because the series # itself has no connections soloTrack = False for tw in trackways: if t.uid in tw.firstTracksList: soloTrack = True break if soloTrack: self._soloTrackCsv.createRow( uid=t.uid, fingerprint=t.fingerprint) continue self.ignoredCount += 1 ignores += 1 isOrphaned = not t.next and not prev self._orphanCsv.createRow( fingerprint=t.fingerprint, orphan='YES' if isOrphaned else 'NO', hidden='YES' if t.hidden else 'NO', uid=t.uid, sitemap=sitemap.filename) processed.append(t) #------------------------------------------------------------------------------------------- # TRACKWAYS # Iterate over the trackways within the current site for tw in self.owner.getTrackways(sitemap): series = dict() twCount = 0 twIncomplete = 0 isReady = True try: bundle = self.owner.getSeriesBundle(tw) except Exception: self.logger.write( '[ERROR]: Invalid trackway series in %s. Skipping status check.' % tw.name) continue for s in bundle.asList(): isReady = isReady and s.isReady twCount += s.count twIncomplete += len(s.incompleteTracks) for t in s.tracks: if t not in processed: processed.append(t) suffix = '' if not s.isValid: suffix += '*' if not s.isComplete: suffix += '...' series[s.trackwayKey] = '%s%s' % (int(s.count), suffix) completion = NumericUtils.roundToOrder( 100.0*float(twCount - twIncomplete)/float(twCount), -2) self._trackwayCsv.createRow( name=tw.name, incomplete=twIncomplete, total=twCount, ready='YES' if isReady else 'NO', complete=completion, **series) self.count += twCount self.incompleteCount += twIncomplete smCount += twCount smInCompCount += twIncomplete smUnprocessed = 0 if len(processed) != len(tracks): for pt in processed: tracks.remove(pt) smUnprocessed = len(tracks) for t in tracks: pt = t.getPreviousTrack() nt = t.getNextTrack() self._unprocessedCsv.createRow( uid=t.uid, fingerprint=t.fingerprint, next=nt.uid if nt else 'NONE', previous=pt.uid if pt else 'NONE') if smCount == 0: completion = 0 else: completion = NumericUtils.roundToOrder( 100.0*float(smCount - smInCompCount)/float(smCount), -2) self._sitemapCsv.createRow( name=sitemap.filename, count=smCount, ignores=ignores, incomplete=smInCompCount, completion=completion, unprocessed=smUnprocessed)
def _postAnalyze(self): h = Histogram( data=self._uncs, binCount=80, xLimits=(0, max(*self._uncs)), color="r", title="Distribution of Rotational Uncertainties", xLabel="Uncertainty Value (degrees)", yLabel="Frequency", ) p1 = h.save(self.getTempFilePath(extension="pdf")) h.isLog = True h.title += " (log)" p2 = h.save(self.getTempFilePath(extension="pdf")) self.mergePdfs([p1, p2], self.getPath("Rotational-Uncertainty-Distribution.pdf")) average = NumericUtils.getMeanAndDeviation(self._uncs) self.logger.write("Average rotational uncertainty: %s" % average.label) # ------------------------------------------------------------------------------------------- # FIND LARGE UNCERTAINTY TRACKS largeUncertaintyCount = 0 drawing = None sitemap = None # If track uncertainty is 2x average, add that track to the spreadsheet and map overlay for t in self._tracks: # if the tracksite has changed, save previous map and make a new one if sitemap != t.trackSeries.trackway.sitemap: # save the last site map drawing (if there was one) if drawing: drawing.save() # then start a new drawing for this new site map sitemap = t.trackSeries.trackway.sitemap fileName = "%s-%s-ROTATION_UNC.svg" % (sitemap.name, sitemap.level) path = self.getPath(self.DRAWING_FOLDER_NAME, fileName, isFile=True) drawing = CadenceDrawing(path, sitemap) # create a group to be instanced for the spreadsheet values drawing.createGroup("rect1") # create a rectangle of 100x100 cm that is to be scaled by fractional meters drawing.rect((0, 0), 100, 100, scene=True, groupId="rect1") # create another group to be instanced for the mapped values. drawing.createGroup("rect2") # create a rectangle of 100x100 cm that is to be scaled by fractional meters drawing.rect((0, 0), 100, 100, scene=True, groupId="rect2") # and place a grid and the federal coordinates in the drawing file drawing.grid() drawing.federalCoordinates() # now examine the positional uncertainties for this track rotation = t.rotationAngle.valueDegrees if rotation.uncertainty <= 2.0 * average.uncertainty: # then just indicate that this track has low uncertainty self._drawLowUncertaintyMarker(drawing, t) # label this track green # drawing.text( # t.name, # (t.x - 20, t.z), # scene=True, # stroke='green', # stroke_width='0.25', # font_size='8px', # font_family='Arial') continue # else, since the uncertainty is high, first write that track in the spreadsheet largeUncertaintyCount += 1 self._largeUncCsv.createRow(uid=t.uid, fingerprint=t.fingerprint, r=rotation.label) # if either the measured width or length is 0, mark with a yellow disk with red outline if t.rotationMeasured == 0: drawing.circle( (t.x, t.z), 100 * (t.widthUncertainty + t.lengthUncertainty) / 2.0, scene=True, fill="yellow", stroke="red", ) # drawing.text( # t.name, # (t.x - 20, t.z), # scene=True, # stroke='black', # stroke_width='0.25', # font_size='6px', # font_family='Arial') continue self._drawHighUncertaintyMarker(drawing, t) # label this track with red # drawing.text( # t.name, # (t.x - 20, t.z), # scene=True, # stroke='red', # stroke_width='0.25', # font_size='6px', # font_family='Arial') # and close off with a final save of the drawing file if drawing: drawing.save() self.logger.write( "%s Tracks with large rotational uncertainties found (%s%%)" % ( largeUncertaintyCount, NumericUtils.roundToOrder(100.0 * float(largeUncertaintyCount) / float(len(self._tracks)), -1), ) ) self._largeUncCsv.save() self._tracks = []
def _postAnalyze(self): h = Histogram( data=self._uncs, binCount=80, xLimits=(0, max(*self._uncs)), color='r', title='Distribution of Spatial (X, Z) Uncertainties', xLabel='Uncertainty Value (m)', yLabel='Frequency') p1 = h.save(self.getTempFilePath(extension='pdf')) h.isLog = True h.title += ' (log)' p2 = h.save(self.getTempFilePath(extension='pdf')) self.mergePdfs([p1, p2], self.getPath('Spatial-Uncertainty-Distribution.pdf')) average = NumericUtils.getMeanAndDeviation(self._uncs) self.logger.write('Average spatial uncertainty: %s' % average.label) #------------------------------------------------------------------------------------------- # FIND LARGE UNCERTAINTY TRACKS largeUncertaintyCount = 0 drawing = None sitemap = None # If track uncertainty is 2x average, add that track to the spreadsheet and map overlay for t in self._tracks: # if the tracksite has changed, save previous map and make a new one if sitemap != t.trackSeries.trackway.sitemap: # save the last site map drawing (if there was one) if drawing: drawing.save() # then start a new drawing for this new site map sitemap = t.trackSeries.trackway.sitemap fileName = sitemap.name + "_" + sitemap.level + '_uncertainty.svg' path = self.getPath(self.DRAWING_FOLDER_NAME, fileName, isFile=True) drawing = CadenceDrawing(path, sitemap) # create a group to be instanced for the spreadsheet values drawing.createGroup('rect1') # create a rectangle of 100x100 cm that is to be scaled by fractional meters drawing.rect((0, 0), 100, 100, scene=True, groupId='rect1') # create another group to be instanced for the mapped values. drawing.createGroup('rect2') # create a rectangle of 100x100 cm that is to be scaled by fractional meters drawing.rect((0, 0), 100, 100, scene=True, groupId='rect2') # and place a grid and the federal coordinates in the drawing file drawing.grid() drawing.federalCoordinates() # now examine the positional uncertainties for this track x = t.xValue z = t.zValue if x.uncertainty > 0.15 or z.uncertainty > 0.15: # s = '%s%s %s%s: %s %s'% ( # t.site, t.level, t.trackwayType, t.trackwayNumber, t.name, t.uid) # print('%s: (%s and %s)' % (s, x.uncertainty, z.uncertainty)) print('%s\t%s' % (t.uid, t.fingerprint)) if max(x.uncertainty, z.uncertainty) <= 2.0*average.uncertainty: # then just indicate that this track has low uncertainty self._drawLowUncertaintyMarker(drawing, t) # label this track with green drawing.text( t.name, (t.x - 20, t.z), scene=True, stroke='green', stroke_width='0.25', font_size='8px', font_family='Arial') continue # else, since the uncertainty is high, first write that track in the spreadsheet largeUncertaintyCount += 1 self._largeUncCsv.createRow( uid=t.uid, fingerprint=t.fingerprint, x=x.label, z=z.label) # if either the measured width or length is 0, mark with a yellow disk with red outline if t.widthMeasured == 0 or t.lengthMeasured == 0: drawing.circle( (t.x, t.z), 100*(t.widthUncertainty + t.lengthUncertainty)/2.0, scene=True, fill='yellow', stroke='red') drawing.text( t.name, (t.x - 20, t.z), scene=True, stroke='black', stroke_width='0.25', font_size='6px', font_family='Arial') continue self._drawHighUncertaintyMarker(drawing, t) # label this track with red drawing.text( t.name, (t.x - 20, t.z), scene=True, stroke='red', stroke_width='0.25', font_size='6px', font_family='Arial') # # # draw this track indicating it has high uncertainty # drawing.use( # 'rect1', # (t.x, t.z), # scene=True, # rotation=t.rotation, # opacity='0.5', # scale=t.widthMeasured, # scaleY=t.lengthMeasured, # fill='red', # stroke='red') # # # draw the map dimensions with an outline gray rectangle # drawing.use( # 'rect2', # (t.x, t.z), # scene=True, # rotation=t.rotation, # scale=t.width, # scaleY=t.length, # fill='none', # stroke='gray') # and close off with a final save of the drawing file if drawing: drawing.save() self.logger.write('%s Tracks with large spatial uncertainties found (%s%%)' % ( largeUncertaintyCount, NumericUtils.roundToOrder( 100.0*float(largeUncertaintyCount)/float(len(self._tracks)), -1) )) self._largeUncCsv.save() self._tracks = []
def _process(self): """_processDeviations doc...""" errors = [] for entry in self.entries: if 'fractional' in entry: errors.append(entry['fractional']) res = NumericUtils.getMeanAndDeviation(errors) self.logger.write('Fractional Pace Error %s' % res.label) label = 'Fractional Pace Errors' d = errors self._paths.append(self._makePlot( label=label, data=d, histRange=(-1.0, 1.0))) self._paths.append(self._makePlot( label=label, data=d, isLog=True, histRange=(-1.0, 1.0))) # noinspection PyUnresolvedReferences d = np.absolute(np.array(d)) self._paths.append(self._makePlot( label='Absolute %s' % label, data=d, histRange=(0.0, 1.0) )) self._paths.append(self._makePlot( label='Absolute %s' % label, data=d, isLog=True, histRange=(0.0, 1.0) )) highDeviationCount = 0 for entry in self.entries: if 'measured' not in entry: # entry['drawFunc']('purple') continue if entry['deviation'] > 2.0: entry['drawFunc']('red') highDeviationCount += 1 else: entry['drawFunc']( 'black' if abs(entry['deviation']) < 2.0 else '#FFAAAA') track = entry['track'] delta = NumericUtils.roundToSigFigs(100.0*abs(entry['delta']), 3) pairTrack = entry.get('pairTrack') if pairTrack: pairedFingerprint = pairTrack.fingerprint pairedUid = pairTrack.uid else: pairedFingerprint = '' pairedUid = '' self._csv.addRow({ 'fingerprint':track.fingerprint, 'uid':track.uid, 'measured':entry['measured'].label, 'entered':entry['entered'].label, 'dev':entry['deviation'], 'delta':delta, 'pairedUid':pairedUid, 'pairedFingerprint':pairedFingerprint}) for sitemap in self.owner.getSitemaps(): # Remove drawing from the sitemap cache and save the drawing file try: sitemap.cache.extract('drawing').save() except Exception: self.logger.write('[WARNING]: No sitemap saved for %s-%s' % ( sitemap.name, sitemap.level)) if not self._csv.save(): self.logger.write( '[ERROR]: Failed to save CSV file %s' % self._csv.path) if not self._errorCsv.save(): self.logger.write( '[ERROR]: Failed to save CSV file %s' % self._errorCsv.path) percentage = NumericUtils.roundToOrder( 100.0*float(highDeviationCount)/float(len(self.entries)), -2) self.logger.write('%s significant %s (%s%%)' % ( highDeviationCount, label.lower(), percentage)) if percentage > (100.0 - 95.45): self.logger.write( '[WARNING]: Large deviation count exceeds normal ' + 'distribution expectations.')
def _analyzeTrackSeries(self, series, trackway, sitemap): if len(series.tracks) < 2: # At least two tracks are required to make the comparison return for track in series.tracks: fieldAngle = Angle( degrees=track.rotationMeasured \ if track.rotationMeasured \ else 0.0) dataAngle = Angle(degrees=track.rotation) strideLine = StrideLine(track=track, series=series) if track.hidden or strideLine.pairTrack.hidden: continue try: strideLine.vector.normalize() except ZeroDivisionError: pair = strideLine.pairTrack self.logger.write([ '[ERROR]: Stride line was a zero length vector', 'TRACK: %s (%s, %s) [%s]' % ( track.fingerprint, NumericUtils.roundToSigFigs(track.x, 3), NumericUtils.roundToSigFigs(track.z, 3), track.uid), 'PAIRING: %s (%s, %s) [%s]' % ( pair.fingerprint, NumericUtils.roundToSigFigs(pair.x, 3), NumericUtils.roundToSigFigs(pair.z, 3), pair.uid) ]) continue axisAngle = strideLine.angle if track.left: fieldAngle.radians += axisAngle.radians else: fieldAngle.radians = axisAngle.radians - fieldAngle.radians # Adjust field angle into range [-180, 180] fieldAngle.constrainToRevolution() if fieldAngle.degrees > 180.0: fieldAngle.degrees -= 360.0 fieldAngleUnc = Angle(degrees=5.0) fieldAngleUnc.radians += \ 0.03/math.sqrt(1.0 - math.pow(strideLine.vector.x, 2)) fieldDeg = NumericUtils.toValueUncertainty( value=fieldAngle.degrees, uncertainty=fieldAngleUnc.degrees) # Adjust data angle into range [-180, 180] dataAngle.constrainToRevolution() if dataAngle.degrees > 180.0: dataAngle.degrees -= 360.0 dataAngleUnc = Angle(degrees=track.rotationUncertainty) dataDeg = NumericUtils.toValueUncertainty( value=dataAngle.degrees, uncertainty=dataAngleUnc.degrees) angle1 = Angle(degrees=dataDeg.value) angle2 = Angle(degrees=fieldDeg.value) # fill color for the disks to be added to the map are based on # diffDeg diffDeg = NumericUtils.toValueUncertainty( value=angle1.differenceBetween(angle2).degrees, uncertainty=min(90.0, math.sqrt( math.pow(dataAngleUnc.degrees, 2) + math.pow(fieldAngleUnc.degrees, 2))) ) self._diffs.append(diffDeg.value) deviation = diffDeg.value/diffDeg.uncertainty self.deviations[track.uid] = diffDeg # for now, convert +/- 180 headings to 0-360, using e and m # comment the next four lines toggle comments for entered and # measured below to revert e = dataDeg.value m = fieldDeg.value if e < 0.0: e += 360.0 if m < 0.0: m += 360.0 data = dict( uid=track.uid, fingerprint=track.fingerprint, entered=str(e), measured=str(m), delta=abs(diffDeg.value), deviation=deviation, relative=NumericUtils.roundToOrder(track.rotationMeasured, -2), axis=NumericUtils.roundToOrder(axisAngle.degrees, -2), axisPairing='NEXT' if strideLine.isNext else 'PREV') self._csv.createRow(**data) data['track'] = track self._data.append(data) # draw the stride line pointer for reference in green self._currentDrawing.use( 'pointer', (track.x, track.z), scene=True, rotation=axisAngle.degrees, stroke_width=1, scale=0.5, stroke='green') # indicate in blue the map-derived estimate of track rotation self._currentDrawing.use( 'pointer', (track.x, track.z), scene=True, rotation=dataDeg.value, stroke_width=1, stroke='blue') # add the measured (spreadsheet) estimate of rotation self._currentDrawing.use( 'pointer', (track.x, track.z), scene=True, rotation=fieldDeg.value, stroke_width=1, stroke='red') # place a translucent disk of radius proportional to the difference # in degrees radius = 100.0*diffDeg.value/180.0 self._currentDrawing.circle( (track.x, track.z), radius, scene=True, fill='red', stroke_width=0.5, stroke='red', fill_opacity='0.5')