def echo(self, asciiLabel=False): """echo doc...""" return '(%s, %s)' % (NumericUtils.toValueUncertainty( self.x, self.xUnc, asciiLabel=asciiLabel).rawLabel, NumericUtils.toValueUncertainty( self.y, self.yUnc, asciiLabel=asciiLabel).rawLabel)
def _calculateDeviation( self, track, value, uncertainty, highMeasuredUncertainty, measured, prefix, label ): if not measured: return None out = dict() measuredUncertainty = measured*( 0.12 if highMeasuredUncertainty else 0.06) v = NumericUtils.toValueUncertainty(value, uncertainty) mv = NumericUtils.toValueUncertainty(measured, measuredUncertainty) unc = math.sqrt(v.uncertainty**2 + mv.uncertainty**2) deviation = v.value - mv.value out['%sDev' % prefix] = deviation/measured try: out['%sDelta' % prefix] = abs(deviation)/unc except ZeroDivisionError: self.logger.write([ '[ERROR]: Track without %s uncertainty' % label, 'TRACK: %s (%s)' % (track.fingerprint, track.uid) ]) raise return out
def __unicode__(self): isPy2 = bool(sys.version < '3') return '<%s (%s, %s)>' % ( self.__class__.__name__, NumericUtils.toValueUncertainty(self.x, self.xUnc, asciiLabel=isPy2).label, NumericUtils.toValueUncertainty(self.y, self.yUnc, asciiLabel=isPy2).label)
def __unicode__(self): isPy2 = bool(sys.version < '3') return '<%s (%s, %s)>' % ( self.__class__.__name__, NumericUtils.toValueUncertainty( self.x, self.xUnc, asciiLabel=isPy2).label, NumericUtils.toValueUncertainty( self.y, self.yUnc, asciiLabel=isPy2).label)
def test_weightedAverage(self): """ doc... """ values = [ NumericUtils.toValueUncertainty(11.0, 1.0), NumericUtils.toValueUncertainty(12.0, 1.0), NumericUtils.toValueUncertainty(10.0, 3.0) ] result = NumericUtils.weightedAverage(*values) self.assertEqual(result.value, 11.4, 'Value Match') self.assertEqual(result.uncertainty, 0.7, 'Value Match')
def test_weightedAverage(self): """ doc... """ values = [ NumericUtils.toValueUncertainty(11.0, 1.0), NumericUtils.toValueUncertainty(12.0, 1.0), NumericUtils.toValueUncertainty(10.0, 3.0) ] result = NumericUtils.weightedAverage(*values) self.assertEqual(result.value, 11.4, 'Value Match') self.assertEqual(result.uncertainty, 0.7, 'Value Match')
def test_toValueUncertainty(self): """test_toValueUncertainty doc...""" value = NumericUtils.toValueUncertainty(math.pi, 0.00456) self.assertEqual(value.value, 3.142, 'Values do not match %s' % value.label) self.assertEqual(value.uncertainty, 0.005, 'Uncertainties do not match %s' % value.label) value = NumericUtils.toValueUncertainty(100.0*math.pi, 42.0) self.assertEqual(value.value, 310.0, 'Values do not match %s' % value.label) self.assertEqual(value.uncertainty, 40.0, 'Uncertainties do not match %s' % value.label) value = NumericUtils.toValueUncertainty(0.001*math.pi, 0.000975) self.assertEqual(value.value, 0.003, 'Values do not match %s' % value.label) self.assertEqual(value.uncertainty, 0.001, 'Uncertainties do not match %s' % value.label)
def _postAnalyze(self): """_postAnalyze doc...""" self._csv.save() meanDiff = NumericUtils.getMeanAndDeviation(self._diffs) self.logger.write('Rotation %s' % meanDiff.label) self._paths.append(self._makePlot( label='Rotation Differences', data=self._diffs, histRange=[-180, 180])) self._paths.append(self._makePlot( label='Rotation Differences', data=self._diffs, histRange=[-180, 180], isLog=True)) circs = [] circsUnc = [] diffs = [] diffsUnc = [] entries = self.owner.getStage('lengthWidth').entries for entry in entries: track = entry['track'] if track.uid not in self.deviations: # Skip those tracks with no deviation value (solo tracks) continue diffDeg = self.deviations[track.uid] diffs.append(abs(diffDeg.value)) diffsUnc.append(diffDeg.uncertainty) # Compute the circularity of the track from its aspect ratio. If # the aspect is less than or equal to 1.0 use the aspect value # directly. However, if the value is greater than one, take the # reciprocal so that large and small aspect ratios can be compared # equally. aspect = entry['aspect'] if aspect.value > 1.0: a = 1.0/aspect.raw aspect = NumericUtils.toValueUncertainty(a, a*(aspect.rawUncertainty/aspect.raw)) circs.append(abs(aspect.value - 1.0)) circsUnc.append(aspect.uncertainty) pl = self.plot self.owner.createFigure('circular') pl.errorbar(x=circs, y=diffs, xerr=circsUnc, yerr=diffsUnc, fmt='.') pl.xlabel('Aspect Circularity') pl.ylabel('Rotation Deviation') pl.title('Rotation Deviation and Aspect Circularity') self._paths.append(self.owner.saveFigure('circular')) self.mergePdfs(self._paths) self._paths = []
def test_toValueUncertainty(self): """test_toValueUncertainty doc...""" value = NumericUtils.toValueUncertainty(math.pi, 0.00456) self.assertEqual(value.value, 3.142, 'Values do not match %s' % value.label) self.assertEqual(value.uncertainty, 0.005, 'Uncertainties do not match %s' % value.label) value = NumericUtils.toValueUncertainty(100.0 * math.pi, 42.0) self.assertEqual(value.value, 310.0, 'Values do not match %s' % value.label) self.assertEqual(value.uncertainty, 40.0, 'Uncertainties do not match %s' % value.label) value = NumericUtils.toValueUncertainty(0.001 * math.pi, 0.000975) self.assertEqual(value.value, 0.003, 'Values do not match %s' % value.label) self.assertEqual(value.uncertainty, 0.001, 'Uncertainties do not match %s' % value.label)
def zValue(self): """ Returns the z value as an uncertainty named tuple in units of meters """ r = math.pi/180.0*float(self.rotation) rUnc = math.pi/180.0*float(self.rotationUncertainty) wUnc = self.widthUncertainty lUnc = self.lengthUncertainty zUnc = lUnc*abs(math.cos(r)) + wUnc*abs(math.sin(r)) \ + rUnc*abs(wUnc*math.cos(r) - lUnc*math.sin(r)) return NumericUtils.toValueUncertainty(0.01*float(self.z), zUnc)
def slope(self): """ Returns the slope of the line as a ValueUncertainty named tuple. """ s = self.start e = self.end deltaX = e.x - s.x deltaY = e.y - s.y try: slope = deltaY/deltaX unc = abs(1.0/deltaX)*(s.yUnc + e.yUnc) + abs(slope/deltaX)*(s.xUnc + e.xUnc) return NumericUtils.toValueUncertainty(slope, unc) except Exception: return None
def distanceTo(self, position): """distanceBetween doc...""" xDelta = self.x - position.x yDelta = self.y - position.y distance = math.sqrt(xDelta * xDelta + yDelta * yDelta) # Use the absolute value because the derivatives in error propagation are always # absolute values xDelta = abs(xDelta) yDelta = abs(yDelta) try: error = (xDelta * (self.xUnc + position.xUnc) + yDelta * (self.yUnc + position.yUnc)) / distance except ZeroDivisionError: error = 1.0 return NumericUtils.toValueUncertainty(distance, error)
def distanceTo(self, position): """distanceBetween doc...""" xDelta = self.x - position.x yDelta = self.y - position.y distance = math.sqrt(xDelta*xDelta + yDelta*yDelta) # Use the absolute value because the derivatives in error propagation are always # absolute values xDelta = abs(xDelta) yDelta = abs(yDelta) try: error = (xDelta*(self.xUnc + position.xUnc) + yDelta*(self.yUnc + position.yUnc) )/distance except ZeroDivisionError: error = 1.0 return NumericUtils.toValueUncertainty(distance, error)
def getDensityDistributionTrace(data, columnName, errorColumnName, **kwargs): minVal = (data[columnName] - 3.0*data[errorColumnName]).min() maxVal = (data[columnName] + 3.0*data[errorColumnName]).max() values = [] for index, row in data.iterrows(): values.append(NumericUtils.toValueUncertainty( value=row[columnName], uncertainty=row[errorColumnName] )) dd = DensityDistribution(values=values) xValues = np.linspace(minVal, maxVal, 200) yValues = dd.createDistribution(xValues) return plotlyGraph.Scatter( x=xValues, y=yValues, **kwargs), dd
def _calculateSparseness(cls, spacings, reference): """ Calculates the relative sparseness from the series spacings list and the reference spacing. """ out = [] for data in spacings: # For each entry in the tests, normalize that value to the most complete (highest # track count) series to create a relative sparseness rating diff = data.value - reference.value absDiff = abs(diff) dVal = reference.value sign = 0.0 if absDiff == 0.0 else diff/absDiff unc = abs(data.uncertainty/dVal) + abs(dVal*sign - absDiff)/(dVal*dVal) out.append(NumericUtils.toValueUncertainty( value=100.0*absDiff/dVal, uncertainty=100.0*unc)) return ListUtils.sortObjectList(out, 'value')
def _analyzeTrack(self, track, series, trackway, sitemap): """ Performs analysis on each track. A dictionary is created to be added to the entries list. That dictionary contains track, wDev (the fractional difference in width between that estimated from the map and that measured in the field), ldev (the corresponding fractional difference in length), and if either of those field measurements are missing, the corresponding counter is incremented. """ data = dict(track=track) result = self._calculateDeviation( track=track, value=track.width, uncertainty=track.widthUncertainty, measured=track.widthMeasured, highMeasuredUncertainty=track.hasImportFlag( ImportFlagsEnum.HIGH_WIDTH_UNCERTAINTY), prefix='w', label='Width') if result: data.update(result) result = self._calculateDeviation( track=track, value=track.length, uncertainty=track.lengthUncertainty, measured=track.lengthMeasured, highMeasuredUncertainty=track.hasImportFlag( ImportFlagsEnum.HIGH_LENGTH_UNCERTAINTY), prefix='l', label='Length') if result: data.update(result) aspect = track.width/track.length wErr = track.widthUncertainty/track.width lErr = track.lengthUncertainty/track.length aspectUnc = abs(aspect)*math.sqrt(wErr*wErr + lErr*lErr) value = NumericUtils.toValueUncertainty(aspect, aspectUnc) data['aspect'] = value self.entries.append(data)
def _logUnresolvableTrack(self, track, sitemap, message): """_logUnresolvableTrack doc...""" measured = NumericUtils.toValueUncertainty( value=track.snapshotData.get(SnapshotDataEnum.PACE), uncertainty=self.MEASURED_UNCERTAINTY) self.ignored += 1 self.logger.write([ '[ERROR]: %s' % message, 'TRACK: %s [%s]' % (track.fingerprint, track.uid), 'PACE[field]: %s' % measured.label ]) self._errorCsv.addRow({ 'uid':track.uid, 'fingerprint':track.fingerprint, 'measured':measured.label }) sitemap.cache.get('drawing').circle( track.positionValue.toMayaTuple(), 10, stroke='none', fill='red', fill_opacity=0.5)
def _calculateAverageSpacing(cls, series): """ Determines the average spacing of the tracks in the track series for use as a comparative measure of sparseness to the other track series in the trackway. If the series is not ready or does not have a sufficient number of tracks, this method will return None. :param: series | TrackSeries The series on which to determine the average spacing. :return: ValueUncertainty A value uncertainty instance that represents the average spacing of the series, or None if it's the calculation is aborted. """ if not series.isReady: # Skip trackways with invalid series return None tracks = series.tracks if not tracks or len(tracks) < 2: # Ignore series with less than two tracks return None length = 0.0 uncs = [] for i in ListUtils.range(len(tracks) - 1): line = LineSegment2D( start=tracks[i].positionValue, end=tracks[i + 1].positionValue) spacing = line.length length += spacing.value uncs.append(spacing.uncertainty) unc = NumericUtils.sqrtSumOfSquares(*uncs) return NumericUtils.toValueUncertainty( value=length/float(len(tracks)), uncertainty=unc/float(len(tracks)) )
def distanceToPoint(self, point): """ Calculates the smallest distance between the specified point and this line segment using the standard formulation as described in: http://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Line_defined_by_two_points """ length = self.length if not length: raise ValueError('Cannot calculate point distance. Invalid line segment.') s = self.start e = self.end deltaX = e.x - s.x deltaY = e.y - s.y if deltaX == 0.0: # Vertical Line distance = abs(s.x - point.x) elif deltaY == 0.0: # Horizontal line distance = abs(s.y - point.y) else: distance = abs(deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y)/length.raw B = deltaY*point.x - deltaX*point.y - s.x*e.y + e.x*s.y AbsB = abs(B) D = math.sqrt(deltaX*deltaX + deltaY*deltaY) DPrime = 1.0/math.pow(deltaX*deltaX + deltaY*deltaY, 3.0/2.0) bBD = B/(AbsB*D) pointXErr = point.xUnc*abs(deltaY*B/(AbsB*D)) pointYErr = point.yUnc*abs(deltaX*B/(AbsB*D)) startXErr = s.xUnc*abs(AbsB*DPrime + bBD*(point.y - e.y)) startYErr = s.yUnc*abs(AbsB*DPrime + bBD*(e.x - point.x)) endXErr = e.xUnc*abs(bBD*(s.y - point.y) - AbsB*DPrime) endYErr = e.yUnc*abs(bBD*(point.x - s.x) - AbsB*DPrime) error = pointXErr + pointYErr + startXErr + startYErr + endXErr + endYErr return NumericUtils.toValueUncertainty(distance, error)
def xValue(self): return NumericUtils.toValueUncertainty(self.x, self.xUnc)
def paceLengthValue(self): return NumericUtils.toValueUncertainty( self.paceLength, self.paceLengthUnc)
def strideLengthValue(self): return NumericUtils.toValueUncertainty( self.strideLength, self.strideLengthUnc)
def gaugeValue(self): return NumericUtils.toValueUncertainty( self.simpleGauge, self.simpleGaugeUnc)
def makeHistograms(label, columnName, errorColumnName, tracks): index = 0 histTraces = [] densityTraces = [] xStart = tracks[columnName].min() xEnd = tracks[columnName].max() sites = tracks.site.unique() for site in sites: index += 1 color = PlotConfigs.SITE_SPECS[site]['color'] siteSlice = tracks[tracks.site == site] histTraces.append(plotlyGraph.Histogram( name=site, x=siteSlice[columnName], autobinx=False, xbins=plotlyGraph.XBins( start=xStart, end=xEnd, size=0.01), xaxis='x1', yaxis='y%s' % int(index), marker=plotlyGraph.Marker(color=color) )) distributionValues = [] for i, row in siteSlice.iterrows(): distributionValues.append(NumericUtils.toValueUncertainty( value=row[columnName], uncertainty=row[errorColumnName])) dd = DensityDistribution(values=distributionValues) xValues = dd.getAdaptiveRange(10) yValues = dd.createDistribution(xValues=xValues, scaled=True) densityTraces.append(plotlyGraph.Scatter( name=site, x=xValues, y=yValues, xaxis='x1', yaxis='y%s' % int(index), mode='lines', fill='tozeroy', marker=plotlyGraph.Marker(color=color) )) fig = plotlyTools.make_subplots( rows=len(sites), cols=1, shared_xaxes=True, print_grid=False) fig['data'] += plotlyGraph.Data(histTraces) fig['layout'].update(title='%s Distributions by Tracksite' % label) url = plotly.plot( filename='A16/%s-Distributions' % label.replace(' ', '-'), figure_or_data=fig, auto_open=False) print('HISTOGRAM[%s]:' % label, PlotlyUtils.toEmbedUrl(url)) fig = plotlyTools.make_subplots( rows=len(sites), cols=1, shared_xaxes=True, print_grid=False) fig['data'] += plotlyGraph.Data(densityTraces) fig['layout'].update( title='%s Distributions by Tracksite' % label, xaxis1=plotlyGraph.XAxis( autorange=False, range=[xStart, xEnd])) url = plotly.plot( filename='A16/%s-Kernel-Distributions' % label.replace(' ', '-'), figure_or_data=fig, auto_open=False) print('KERNEL-DENSITY[%s]:' % label, PlotlyUtils.toEmbedUrl(url))
def _sampleTrackway(self, trackway, windowSize): """ Samples the trackway and returns result @type trackway: * """ window = [] samples = [] entries = self.trackHeadingData[trackway.uid]['entries'] analysisTrackway = trackway.getAnalysisPair(self.analysisSession) for entry in entries: # For each track entry in the trackways data add that to the sample window and update # the samples result window.append(entry) if len(window) < windowSize: # Don't create a sample until the sub-sample list exceeds the sample window size continue xTests = [] # X spatial position values yTests = [] # Y spatial position values angleTests = [] # Heading angle values curvePosTests = [] # Curve position values for item in window: # Calculate weighted averages for various properties of the current sample window angle = item.headingAngle angleTests.append(angle.valueDegrees) # Create a ValueUncertainty for the curve position by using the fractional # positional uncertainty over the spatial length of the curve posValue = item.track.positionValue posUnc = math.sqrt(posValue.xUnc**2 + posValue.yUnc**2) curvePos = item.track.getAnalysisPair(self.analysisSession).curvePosition curvePosUnc = abs(posUnc/analysisTrackway.curveLength) curvePosTests.append(NumericUtils.toValueUncertainty(curvePos, curvePosUnc)) pv = item.track.positionValue xTests.append(pv.xValue) yTests.append(pv.yValue) directionAngleMean = NumericUtils.weightedAverage(*angleTests) curvePositionMean = NumericUtils.weightedAverage(*curvePosTests) xValue = NumericUtils.weightedAverage(*xTests) yValue = NumericUtils.weightedAverage(*yTests) position = PositionValue2D( x=xValue.raw, xUnc=xValue.rawUncertainty, y=yValue.raw, yUnc=yValue.rawUncertainty) # Remove the oldest sample from the to make room for a new sample in the next iteration window.pop(0) if len(samples) > 0: # Compare this sample to the previous one and if it does not differ # significantly then continue to continue to the next iteration last = samples[-1].directionAngle totalUnc = last.rawUncertainty + directionAngleMean.rawUncertainty deviation = abs(directionAngleMean.raw - last.raw)/totalUnc if deviation < 2.0: continue samples.append(self.SAMPLE_DATA_NT( directionAngle=directionAngleMean, position=position, curvePoint=( curvePositionMean.value, directionAngleMean.value, curvePositionMean.uncertainty, directionAngleMean.uncertainty), curvePosition=curvePositionMean, track=entry.track )) self._extendSamplesToTrackwayStart(entries[0], samples) self._extendSampleToTrackwayEnd(entries[-1], samples) return samples
def valueDegrees(self): return NumericUtils.toValueUncertainty(self.degrees, self.uncertaintyDegrees)
def yValue(self): return NumericUtils.toValueUncertainty(self.y, self.yUnc)
def xValue(self): return NumericUtils.toValueUncertainty(self.x, self.xUnc)
def echo(self, asciiLabel =False): """echo doc...""" return '(%s, %s)' % ( NumericUtils.toValueUncertainty(self.x, self.xUnc, asciiLabel=asciiLabel).rawLabel, NumericUtils.toValueUncertainty(self.y, self.yUnc, asciiLabel=asciiLabel).rawLabel)
def yValue(self): return NumericUtils.toValueUncertainty(self.y, self.yUnc)
def widthValue(self): return NumericUtils.toValueUncertainty( self.width, self.widthUncertainty)
def lengthValue(self): return NumericUtils.toValueUncertainty( self.length, self.lengthUncertainty)
def value(self): return NumericUtils.toValueUncertainty(self.radians, self.uncertainty)
def _analyzeTrackSeries(self, series, trackway, sitemap): if len(series.tracks) < 2: # At least two tracks are required to make the comparison return for track in series.tracks: fieldAngle = Angle( degrees=track.rotationMeasured \ if track.rotationMeasured \ else 0.0) dataAngle = Angle(degrees=track.rotation) strideLine = StrideLine(track=track, series=series) if track.hidden or strideLine.pairTrack.hidden: continue try: strideLine.vector.normalize() except ZeroDivisionError: pair = strideLine.pairTrack self.logger.write([ '[ERROR]: Stride line was a zero length vector', 'TRACK: %s (%s, %s) [%s]' % ( track.fingerprint, NumericUtils.roundToSigFigs(track.x, 3), NumericUtils.roundToSigFigs(track.z, 3), track.uid), 'PAIRING: %s (%s, %s) [%s]' % ( pair.fingerprint, NumericUtils.roundToSigFigs(pair.x, 3), NumericUtils.roundToSigFigs(pair.z, 3), pair.uid) ]) continue axisAngle = strideLine.angle if track.left: fieldAngle.radians += axisAngle.radians else: fieldAngle.radians = axisAngle.radians - fieldAngle.radians # Adjust field angle into range [-180, 180] fieldAngle.constrainToRevolution() if fieldAngle.degrees > 180.0: fieldAngle.degrees -= 360.0 fieldAngleUnc = Angle(degrees=5.0) fieldAngleUnc.radians += \ 0.03/math.sqrt(1.0 - math.pow(strideLine.vector.x, 2)) fieldDeg = NumericUtils.toValueUncertainty( value=fieldAngle.degrees, uncertainty=fieldAngleUnc.degrees) # Adjust data angle into range [-180, 180] dataAngle.constrainToRevolution() if dataAngle.degrees > 180.0: dataAngle.degrees -= 360.0 dataAngleUnc = Angle(degrees=track.rotationUncertainty) dataDeg = NumericUtils.toValueUncertainty( value=dataAngle.degrees, uncertainty=dataAngleUnc.degrees) angle1 = Angle(degrees=dataDeg.value) angle2 = Angle(degrees=fieldDeg.value) # fill color for the disks to be added to the map are based on # diffDeg diffDeg = NumericUtils.toValueUncertainty( value=angle1.differenceBetween(angle2).degrees, uncertainty=min(90.0, math.sqrt( math.pow(dataAngleUnc.degrees, 2) + math.pow(fieldAngleUnc.degrees, 2))) ) self._diffs.append(diffDeg.value) deviation = diffDeg.value/diffDeg.uncertainty self.deviations[track.uid] = diffDeg # for now, convert +/- 180 headings to 0-360, using e and m # comment the next four lines toggle comments for entered and # measured below to revert e = dataDeg.value m = fieldDeg.value if e < 0.0: e += 360.0 if m < 0.0: m += 360.0 data = dict( uid=track.uid, fingerprint=track.fingerprint, entered=str(e), measured=str(m), delta=abs(diffDeg.value), deviation=deviation, relative=NumericUtils.roundToOrder(track.rotationMeasured, -2), axis=NumericUtils.roundToOrder(axisAngle.degrees, -2), axisPairing='NEXT' if strideLine.isNext else 'PREV') self._csv.createRow(**data) data['track'] = track self._data.append(data) # draw the stride line pointer for reference in green self._currentDrawing.use( 'pointer', (track.x, track.z), scene=True, rotation=axisAngle.degrees, stroke_width=1, scale=0.5, stroke='green') # indicate in blue the map-derived estimate of track rotation self._currentDrawing.use( 'pointer', (track.x, track.z), scene=True, rotation=dataDeg.value, stroke_width=1, stroke='blue') # add the measured (spreadsheet) estimate of rotation self._currentDrawing.use( 'pointer', (track.x, track.z), scene=True, rotation=fieldDeg.value, stroke_width=1, stroke='red') # place a translucent disk of radius proportional to the difference # in degrees radius = 100.0*diffDeg.value/180.0 self._currentDrawing.circle( (track.x, track.z), radius, scene=True, fill='red', stroke_width=0.5, stroke='red', fill_opacity='0.5')
def _analyzeTrackSeries(self, series, trackway, sitemap): for index in range(series.count): track = series.tracks[index] stride = self.getStride(track) aTrack = track.getAnalysisPair(self.analysisSession) aTrack.strideLength = 0.0 aTrack.strideLengthUnc = 0.0 if stride is None: # Count tracks with no measured stride self.noData += 1 if series.count < 2: # Series of length 1 should not have a measured stride length if stride: # Check for a stride to make sure data is consistent # instead of assuming that is not true. self.logger.write([ '[ERROR]: Stride information on a single track series', 'TRACK: %s (%s)' % (track.fingerprint, track.uid) ]) continue isLastTrack = (index == (series.count - 1)) pairTrack = series.tracks[index + (-1 if isLastTrack else 1)] if isLastTrack and pairTrack.next != track.uid: self.logger.write([ '[ERROR]: Invalid track ordering (last track)', 'PREV: %s (%s)' % (pairTrack.fingerprint, pairTrack.uid), 'TRACK: %s (%s)' % (track.fingerprint, track.uid) ]) elif not isLastTrack and track.next != pairTrack.uid: self.logger.write([ '[ERROR]: Invalid track ordering', 'TRACK: %s (%s)' % (track.fingerprint, track.uid), 'NEXT: %s (%s)' % (pairTrack.fingerprint, pairTrack.uid) ]) posTrack = track.positionValue posPair = pairTrack.positionValue try: entered = posTrack.distanceTo(posPair) except Exception: self.logger.write([ '[WARNING]: Invalid track separation of 0.0.', 'TRACK: %s [%s]' % (track.fingerprint, track.uid), 'NEXT: %s [%s]' % (pairTrack.fingerprint, track.uid)]) continue entry = dict( track=track, # Calculated distance from AI-based data entry entered=entered) if stride and stride > 0.0: highDeviation = track.hasImportFlag( ImportFlagsEnum.HIGH_STRIDE_UNCERTAINTY) # If stride measurement exists do comparison meas = NumericUtils.toValueUncertainty( value=float(stride), uncertainty=self.HIGH_MEASURED_UNCERTAINTY if highDeviation else self.MEASURED_UNCERTAINTY) delta = entered.value - meas.value deviation = abs(delta/math.sqrt( meas.uncertainty**2 + entered.uncertainty**2)) if highDeviation and deviation > 3: styles = ('orange', 10) else: # Measured distance from the catalog entry['measured'] = meas # Absolute difference between calculated and measured distance entry['delta'] = delta # Sigma trackDeviations between entry['deviation'] = deviation # Fractional error between calculated and measured distance entry['fractional'] = delta/meas.value styles = ('red', 10) \ if entry['deviation'] > 2.0 \ else ('green', 5) else: styles = ('purple', 5) drawing = sitemap.cache.get('drawing') if not isLastTrack: aTrack.strideLength = entered.raw aTrack.strideLengthUnc = entered.rawUncertainty drawing.line( posTrack.toMayaTuple(), posPair.toMayaTuple(), stroke=styles[0], stroke_width=1, stroke_opacity='0.1') drawing.circle( posTrack.toMayaTuple(), styles[1], stroke='none', fill=styles[0], fill_opacity=0.5) self.entries.append(entry) track.cache.set('strideData', entry)
def _analyzeSeriesPair(self, sitemap, series, pairSeries): """_analyzeSeriesPair doc...""" for index in ListUtils.range(series.count): track = series.tracks[index] data = track.snapshotData aTrack = track.getAnalysisPair(self.analysisSession) aTrack.paceLength = 0.0 aTrack.paceLengthUnc = 0.0 if self.hasPace(track): measured = NumericUtils.toValueUncertainty( value=data.get(SnapshotDataEnum.PACE), uncertainty=self.MEASURED_UNCERTAINTY) else: self.noData +=1 measured = None pairTrack = self._getPairedTrack(track, series, pairSeries) if pairTrack is None: if measured is None: continue self._logUnresolvableTrack( track=track, sitemap=sitemap, message='Unable to determine pairSeries track') continue position = track.positionValue pairPosition = pairTrack.positionValue paceLine = LineSegment2D(position, pairPosition) if not paceLine.isValid: self._logUnresolvableTrack( track=track, sitemap=sitemap, message='Invalid track separation of 0.0. Ignoring track') continue entered = paceLine.length entry = dict( track=track, pairTrack=pairTrack, drawFunc=functools.partial( self._drawPaceLine, sitemap, paceLine), # Calculated distance from AI-based data entry entered=entered) if measured: # Measured distance from the catalog entry['measured'] = measured # Absolute difference between calculated and measured distance delta = entered.raw - measured.raw entry['delta'] = delta # Fractional error between calculated and measured distance entry['fractional'] = delta/measured.raw # Sigma trackDeviations between entry['deviation'] = abs(delta/math.sqrt( measured.rawUncertainty**2 + entered.rawUncertainty**2)) self.count += 1 aTrack = track.getAnalysisPair(self.analysisSession) aTrack.paceLength = entered.raw aTrack.paceLengthUnc = entered.rawUncertainty self.entries.append(entry) # self._drawPaceLine(sitemap, paceLine, ) track.cache.set('paceData', entry)