Esempio n. 1
0
    def _postAnalyze(self):
        """ Write the logs. """

        self._paths = []

        self.logger.write(
            '%s\nFRACTIONAL ERROR (Measured vs Entered)' % ('='*80))
        self._process('Error', 'wDev', 'lDev', self.trackDeviations)

        self.logger.write('%s\nFRACTIONAL UNCERTAINTY ERROR' % ('='*80))
        self._process(
            'Uncertainty Error', 'wDelta', 'lDelta', None, absoluteOnly=True)

        csv = CsvWriter(
            path=self.getPath('Length-Width-Deviations.csv'),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint'),
                ('wDelta', 'Width Deviation'),
                ('lDelta', 'Length Deviation') ])

        for entry in self.entries:
            track = entry['track']
            csv.createRow(
                uid=track.uid,
                fingerprint=track.fingerprint,
                wDelta=entry.get('wDelta', -1.0),
                lDelta=entry.get('lDelta', -1.0) )
        csv.save()

        self._processAspectRatios()

        self.mergePdfs(self._paths)
Esempio n. 2
0
    def _preAnalyze(self):
        """_preDeviations doc..."""
        self.noData = 0
        self.entries = []
        self._paths = []

        self.initializeFolder(self.MAPS_FOLDER_NAME)

        csv = CsvWriter()
        csv.path = self.getPath('Pace-Length-Deviations.csv', isFile=True)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('entered', 'Field'),
            ('measured', 'Measured'),
            ('dev', 'Deviation'),
            ('delta', 'Fractional Error'),
            ('pairedFingerprint', 'Track Pair Fingerprint'),
            ('pairedUid', 'Track Pair UID') )
        self._csv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Pace-Match-Errors.csv', isFile=True)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('measured', 'Measured') )
        self._errorCsv = csv
Esempio n. 3
0
    def _preAnalyze(self):
        self._tracks = []

        csv = CsvWriter()
        csv.path = self.getPath('Origin-Located.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint') )
        self._csv = csv
    def _preAnalyze(self):
        self._uncs = []
        self._tracks = []

        self.initializeFolder(self.DRAWING_FOLDER_NAME)

        csv = CsvWriter()
        csv.path = self.getPath("Large-Rotational-Uncertainties.csv")
        csv.autoIndexFieldName = "Index"
        csv.addFields(("uid", "UID"), ("fingerprint", "Fingerprint"), ("rotation", "Rotation"))
        self._largeUncCsv = csv
    def _analyzeTrackway(self, trackway, sitemap):
        """

        @param trackway:
        @param sitemap:
        @return:
        """

        self.entries = dict(lp=[], rp=[], lm=[], rm=[])

        super(SimulationCsvExporterStage, self)._analyzeTrackway(
            trackway=trackway,
            sitemap=sitemap
        )

        csv = CsvWriter(
            autoIndexFieldName='Index',
            fields=[
                'lp_name', 'lp_uid', 'lp_x', 'lp_dx', 'lp_y', 'lp_dy',
                'rp_name', 'rp_uid', 'rp_x', 'rp_dx', 'rp_y', 'rp_dy',
                'lm_name', 'lm_uid', 'lm_x', 'lm_dx', 'lm_y', 'lm_dy',
                'rm_name', 'rm_uid', 'rm_x', 'rm_dx', 'rm_y', 'rm_dy'
            ]
        )

        length = max(
            len(self.entries['lp']),
            len(self.entries['rp']),
            len(self.entries['lm']),
            len(self.entries['rm']),
        )

        for index in range(length):
            items = []
            for limb_id, entries in self.entries.items():
                if index < len(entries):
                    items += entries[index].items()
                else:
                    items += self._create_entry(limb_id).items()
            csv.addRow(dict(items))

        path = self.owner.settings.fetch('EXPORT_DATA_PATH')
        if path is None:
            path = self.owner.getLocalPath('Simulation', 'data', isFile=True)
        path = FileUtils.makeFilePath(path, trackway.name, 'source.csv')

        directory = os.path.dirname(path)
        if not os.path.exists(directory):
            os.makedirs(directory)

        if csv.save(path):
            print('[SAVED]:', path)
        else:
            print('[ERROR]: Unable to save CSV at "{}"'.format(path))
Esempio n. 6
0
    def _preAnalyze(self):
        self._tracks = []

        csv = CsvWriter()
        csv.path = self.getPath('Track-Priority.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('priority', 'Priority'),
            ('preserved', 'Preserved'),
            ('cast', 'Cast'),
            ('outlined', 'Outlined') )
        self._csv = csv
Esempio n. 7
0
    def _postAnalyze(self):
        self.logger.write('%s gauge calculated tracks' % self._count)

        self._trackwayCsv.save()

        csv = CsvWriter(
            path=self.getPath('Simple-Gauge-Errors.csv', isFile=True),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint') ])
        for track in self._errorTracks:
            csv.createRow(uid=track.uid, fingerprint=track.fingerprint)
        csv.save()

        if self._errorTracks:
            self.logger.write('Failed to calculate gauge for %s tracks' % len(self._errorTracks))

        csv = CsvWriter(
            path=self.getPath('Simple-Gauge-Ignores.csv', isFile=True),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint') ])
        for track in self._ignoreTracks:
            csv.createRow(uid=track.uid, fingerprint=track.fingerprint)
        csv.save()

        if self._ignoreTracks:
            self.logger.write('%s tracks lacked suitability for gauge calculation' % len(
                self._ignoreTracks))

        plotData = [
            ('stride', 'red', 'AU', 'Stride-Normalized Weighted'),
            ('pace', 'green', 'AU', 'Pace-Normalized Weighted'),
            ('width', 'blue', 'AU', 'Width-Normalized Weighted'),
            ('abs', 'purple', 'm', 'Absolute Unweighted') ]

        for data in plotData:
            out = []
            source = ListUtils.sortListByIndex(
                source=getattr(self._trackwayGauges, StringUtils.toStr2(data[0])),
                index=0,
                inPlace=True)

            for item in source:
                out.append(PositionValue2D(x=len(out), y=item[1].value, yUnc=item[1].uncertainty))
            self._plotTrackwayGauges(out, *data[1:])

        self.mergePdfs(self._paths, 'Gauges.pdf')
Esempio n. 8
0
    def _preAnalyze(self):
        self._uncs   = []
        self._tracks = []

        self.initializeFolder(self.DRAWING_FOLDER_NAME)

        csv = CsvWriter()
        csv.path = self.getPath('Large-Spatial-Uncertainties.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('x', 'X'),
            ('z', 'Z') )
        self._largeUncCsv = csv
Esempio n. 9
0
    def _preAnalyze(self):
        """_preDeviations doc..."""
        self.noData = 0
        self.entries = []
        self._paths = []

        csv = CsvWriter()
        csv.path = self.getPath('Stride-Length-Deviations.csv', isFile=True)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('entered', 'Entered'),
            ('measured', 'Measured'),
            ('dev', 'Deviation'),
            ('delta', 'Fractional Error'))
        self._csv = csv
Esempio n. 10
0
    def _preAnalyze(self):
        """_preAnalyze doc..."""
        self.cache.set('trackDeviations', {})
        self._diffs = []
        self._data  = []
        self._currentDrawing = None

        csv = CsvWriter()
        csv.path = self.getPath('Rotation-Report.csv', isFile=True)
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('delta', 'Discrepancy'),
            ('entered', 'Entered'),
            ('measured', 'Measured'),
            ('deviation', 'Deviation'),
            ('relative', 'Relative'),
            ('axis', 'Axis'),
            ('axisPairing', 'Axis Pairing'))
        self._csv = csv
Esempio n. 11
0
    def _preAnalyze(self):
        self._trackways = []
        self._densityPlots = dict()

        fields = [
            ('name', 'Name'),
            ('length', 'Length'),
            ('gauge', 'Gauge'),
            ('gaugeUnc', 'Gauge Uncertainty'),
            ('widthNormGauge', 'Width Normalized Gauge'),
            ('widthNormGaugeUnc', 'Width Normalized Gauge Uncertainty'),
            ('strideLength', 'Stride Length'),
            ('strideLengthUnc', 'Stride Length Uncertainty'),
            ('paceLength', 'Pace Length'),
            ('paceLengthUnc', 'Pace Length Uncertainty'),
            ('density', 'Density'),
            ('densityNorm', 'Normalize Density'),
            ('densityNormUnc', 'Normalize Density Uncertainty'),
            ('pesWidth', 'Pes Width'),
            ('pesWidthUnc', 'Pes Width Uncertainty'),
            ('pesLength', 'Pes Length'),
            ('pesLengthUnc', 'Pes Length Uncertainty'),
            ('manusWidth', 'Manus Width'),
            ('manusWidthUnc', 'Manus Width Uncertainty'),
            ('manusLength', 'Manus Length'),
            ('manusLengthUnc', 'Manus Length Uncertainty') ]

        csv = CsvWriter()
        csv.path = self.getPath(self.TRACKWAY_STATS_CSV)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(*fields)
        self._weightedStats = csv

        csv = CsvWriter()
        csv.path = self.getPath(self.UNWEIGHTED_TRACKWAY_STATS_CSV)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(*fields)
        self._unweightedStats = csv
Esempio n. 12
0
    def _preAnalyze(self):
        self._trackwayGauges = self._GAUGE_DATA_NT([], [], [], [])
        self._paths = []
        self._errorTracks = []
        self._ignoreTracks = []
        self._count = 0

        self._trackwayCsv = CsvWriter(
            path=self.getPath('Trackway-Gauge-Averages.csv'),
            autoIndexFieldName='Index',
            fields=[
                ('name', 'Name'),
                ('count', 'Pes Count'),
                ('abs', 'Absolute'),
                ('absUnc', 'Abs Unc'),
                ('stride', 'Stride Norm'),
                ('strideUnc', 'Stride Unc'),
                ('pace', 'Pace Norm'),
                ('paceUnc', 'Pace Unc'),
                ('width', 'Width Norm'),
                ('widthUnc', 'Width Unc')])
Esempio n. 13
0
    def _preAnalyze(self):
        csv = CsvWriter()
        csv.path = self.getPath('Solo-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint') )
        self._soloTrackCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Corrupt-Track-Report.csv')
        csv.removeIfSavedEmpty = True
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('i', 'Database Index'),
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('reason', 'Reason'))
        self._badTrackCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Unprocessed-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('previous', 'Previous Track UID'),
            ('next', 'Next Track UID') )
        self._unprocessedCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Unknown-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('hidden', 'Hidden'),
            ('complete', 'Complete') )
        self._unknownCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Ignored-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('sitemap', 'Sitemap Name'),
            ('fingerprint', 'Fingerprint'),
            ('hidden', 'Hidden'),
            ('orphan', 'Orphaned') )
        self._orphanCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Sitemap-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('name', 'Sitemap Name'),
            ('unprocessed', 'Unprocessed'),
            ('ignores', 'Ignored'),
            ('count', 'Count'),
            ('incomplete', 'Incomplete'),
            ('completion', 'Completion (%)') )
        self._sitemapCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Trackway-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('name', 'Name'),
            ('leftPes', 'Left Pes'),
            ('rightPes', 'Right Pes'),
            ('leftManus', 'Left Manus'),
            ('rightManus', 'Right Manus'),
            ('incomplete', 'Incomplete'),
            ('total', 'Total'),
            ('ready', 'Analysis Ready'),
            ('complete', 'Completion (%)') )
        self._trackwayCsv = csv

        self._allTracks = dict()

        #-------------------------------------------------------------------------------------------
        # CREATE ALL TRACK LISTING
        #       This list is used to find tracks that are not referenced by relationships to
        #       sitemaps, which would never be loaded by standard analysis methods
        model = Tracks_Track.MASTER
        session = model.createSession()
        tracks = session.query(model).all()
        for t in tracks:
            self._checkTrackProperties(t, tracks)
            self._allTracks[t.uid] = dict(
                uid=t.uid,
                fingerprint=t.fingerprint,
                hidden=t.hidden,
                complete=t.isComplete)
        session.close()
Esempio n. 14
0
    def _addQuartileEntry(self, label, trackway, data):
        if not data or len(data) < 4:
            return

        if label not in self._quartileStats:
            csv = CsvWriter()
            csv.path = self.getPath(
                '%s-Quartiles.csv' % label.replace(' ', '-'),
                isFile=True)
            csv.autoIndexFieldName = 'Index'
            csv.addFields(
                ('name', 'Name'),

                ('normality', 'Normality'),
                ('unweightedNormality', 'Unweighted Normality'),

                ('unweightedLowerBound', 'Unweighted Lower Bound'),
                ('unweightedLowerQuart', 'Unweighted Lower Quartile'),
                ('unweightedMedian',     'Unweighted Median'),
                ('unweightedUpperQuart', 'Unweighted Upper Quartile'),
                ('unweightedUpperBound', 'Unweighted Upper Bound'),

                ('lowerBound', 'Lower Bound'),
                ('lowerQuart', 'Lower Quartile'),
                ('median',     'Median'),
                ('upperQuart', 'Upper Quartile'),
                ('upperBound', 'Upper Bound'),

                ('diffLowerBound', 'Diff Lower Bound'),
                ('diffLowerQuart', 'Diff Lower Quartile'),
                ('diffMedian',     'Diff Median'),
                ('diffUpperQuart', 'Diff Upper Quartile'),
                ('diffUpperBound', 'Diff Upper Bound') )
            self._quartileStats[label] = csv

        csv = self._quartileStats[label]
        dd = mstats.density.Distribution(data)
        unweighted = mstats.density.boundaries.unweighted_two(dd)
        weighted = mstats.density.boundaries.weighted_two(dd)

        #-----------------------------------------------------------------------
        # PLOT DENSITY
        #   Create a density plot for each value
        p = MultiScatterPlot(
            title='%s %s Density Distribution' % (trackway.name, label),
            xLabel=label,
            yLabel='Probability (AU)')

        x_values = mstats.density.ops.adaptive_range(dd, 10.0)
        y_values = dd.probabilities_at(x_values=x_values)

        p.addPlotSeries(
            line=True,
            markers=False,
            label='Weighted',
            color='blue',
            data=zip(x_values, y_values)
        )

        temp = mstats.density.create_distribution(
            dd.naked_measurement_values(raw=True)
        )
        x_values = mstats.density.ops.adaptive_range(dd, 10.0)
        y_values = dd.probabilities_at(x_values=x_values)

        p.addPlotSeries(
            line=True,
            markers=False,
            label='Unweighted',
            color='red',
            data=zip(x_values, y_values)
        )

        if label not in self._densityPlots:
            self._densityPlots[label] = []
        self._densityPlots[label].append(
            p.save(self.getTempFilePath(extension='pdf')))

        #-----------------------------------------------------------------------
        # NORMALITY
        #       Calculate the normality of the weighted and unweighted
        #       distributions as a test against how well they conform to
        #       the Normal distribution calculated from the unweighted data.
        #
        #       The unweighted Normality test uses a basic bandwidth detection
        #       algorithm to create a uniform Gaussian kernel to populate the
        #       DensityDistribution. It is effectively a density kernel
        #       estimation, but is aggressive in selecting the bandwidth to
        #       prevent over-smoothing multi-modal distributions.
        if len(data) < 8:
            normality = -1.0
            unweightedNormality = -1.0
        else:
            result = NumericUtils.getMeanAndDeviation(data)
            mean = result.raw
            std = result.rawUncertainty
            normality = mstats.density.ops.overlap(
                dd,
                mstats.density.create_distribution([mean], [std])
            )

            rawValues = []
            for value in data:
                rawValues.append(value.value)
            ddRaw = mstats.density.create_distribution(rawValues)
            unweightedNormality = mstats.density.ops.overlap(
                ddRaw,
                mstats.density.create_distribution([mean], [std])
            )

        # Prevent divide by zero
        unweighted = [
            0.00001 if NumericUtils.equivalent(x, 0) else x
            for x in unweighted
        ]

        csv.addRow({
            'index':trackway.index,
            'name':trackway.name,

            'normality':normality,
            'unweightedNormality':unweightedNormality,

            'unweightedLowerBound':unweighted[0],
            'unweightedLowerQuart':unweighted[1],
            'unweightedMedian'    :unweighted[2],
            'unweightedUpperQuart':unweighted[3],
            'unweightedUpperBound':unweighted[4],

            'lowerBound':weighted[0],
            'lowerQuart':weighted[1],
            'median'    :weighted[2],
            'upperQuart':weighted[3],
            'upperBound':weighted[4],

            'diffLowerBound':abs(unweighted[0] - weighted[0])/unweighted[0],
            'diffLowerQuart':abs(unweighted[1] - weighted[1])/unweighted[1],
            'diffMedian'    :abs(unweighted[2] - weighted[2])/unweighted[2],
            'diffUpperQuart':abs(unweighted[3] - weighted[3])/unweighted[3],
            'diffUpperBound':abs(unweighted[4] - weighted[4])/unweighted[4]
        })
Esempio n. 15
0
    def _process(
            self, label, widthKey, lengthKey, trackDeviations,
            absoluteOnly =False
    ):
        """_process doc..."""
        pl  = self.plot
        ws  = []
        ls  = []
        w2D = []
        l2D = []

        for entry in self.entries:
            if widthKey in entry:
                ws.append(entry[widthKey])
                if lengthKey in entry:
                    w2D.append(entry[widthKey])

            if lengthKey in entry:
                ls.append(entry[lengthKey])
                if widthKey in entry:
                    l2D.append(entry[lengthKey])

        plotList = [
            ('widths', ws, 'Width', 'b'),
            ('lengths', ls, 'Length', 'r')]

        wRes = NumericUtils.getMeanAndDeviation(ws)
        self.logger.write('Width %ss' % wRes.label)
        lRes = NumericUtils.getMeanAndDeviation(ls)
        self.logger.write('Length %ss' % lRes.label)

        for data in plotList:
            if not absoluteOnly:
                d = data[1]
                self._paths.append(
                    self._makePlot(
                        label, d, data,
                        histRange=(-1.0, 1.0)))
                self._paths.append(
                    self._makePlot(
                        label, d, data,
                        isLog=True,
                        histRange=(-1.0, 1.0)))

            # noinspection PyUnresolvedReferences
            d = np.absolute(np.array(data[1]))
            self._paths.append(
                self._makePlot(
                    'Absolute ' + label, d, data,
                    histRange=(0.0, 1.0)))
            self._paths.append(
                self._makePlot(
                    'Absolute ' + label, d, data,
                    isLog=True,
                    histRange=(0.0, 1.0)))

        self.owner.createFigure('twoD')
        pl.hist2d(w2D, l2D, bins=20, range=([-1, 1], [-1, 1]))
        pl.title('2D %s Distribution' % label)
        pl.xlabel('Width %s' % label)
        pl.ylabel('Length %s' % label)
        pl.xlim(-1.0, 1.0)
        pl.ylim(-1.0, 1.0)
        path = self.getTempPath(
            '%s.pdf' % StringUtils.getRandomString(16),
            isFile=True)
        self.owner.saveFigure('twoD', path)
        self._paths.append(path)

        csv = CsvWriter()
        csv.path = self.getPath(
            '%s-Deviations.csv' % label.replace(' ', '-'),
            isFile=True)
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('wSigma', 'Width Deviation'),
            ('lSigma', 'Length Deviation') )

        count = 0
        for entry in self.entries:
            widthDevSigma  = NumericUtils.roundToOrder(
                abs(entry.get(widthKey, 0.0)/wRes.uncertainty), -2)
            lengthDevSigma = NumericUtils.roundToOrder(
                abs(entry.get(lengthKey, 0.0)/lRes.uncertainty), -1)
            if widthDevSigma > 2.0 or lengthDevSigma > 2.0:
                count += 1
                track = entry['track']
                data = dict(
                    wSigma=widthDevSigma,
                    lSigma=lengthDevSigma)

                if trackDeviations is not None:
                    trackDeviations[track.uid] = data

                csv.createRow(
                    uid=track.uid,
                    fingerprint=track.fingerprint,
                    **data)

        if not csv.save():
            self.logger.write(
                '[ERROR]: Failed to save CSV file to %s' % csv.path)

        percentage = NumericUtils.roundToOrder(
            100.0*float(count)/float(len(self.entries)), -2)
        self.logger.write('%s significant %ss (%s%%)' % (
            count, label.lower(), percentage))
        if percentage > (100.0 - 95.45):
            self.logger.write(
                '[WARNING]: Large deviation count exceeds' +
                'normal distribution expectations.')
Esempio n. 16
0
def write_to_file(trackway, tracks_data):
    """
    @param trackway:
    @param tracks_data:
    @return:
    """

    csv = CsvWriter(
        path="{}.csv".format(trackway.name),
        autoIndexFieldName="Index",
        fields=[
            "lp_name",
            "lp_uid",
            "lp_x",
            "lp_dx",
            "lp_y",
            "lp_dy",
            "lp_w",
            "lp_dw",
            "lp_l",
            "lp_dl",
            "rp_name",
            "rp_uid",
            "rp_x",
            "rp_dx",
            "rp_y",
            "rp_dy",
            "rp_w",
            "rp_dw",
            "rp_l",
            "rp_dl",
            "lm_name",
            "lm_uid",
            "lm_x",
            "lm_dx",
            "lm_y",
            "lm_dy",
            "lm_w",
            "lm_dw",
            "lm_l",
            "lm_dl",
            "rm_name",
            "rm_uid",
            "rm_x",
            "rm_dx",
            "rm_y",
            "rm_dy",
            "rm_w",
            "rm_dw",
            "rm_l",
            "rm_dl",
        ],
    )

    count = max([len(ts) if ts else 0 for ts in tracks_data.values()])

    for i in range(count):
        entry = {}

        for key in ["lp", "rp", "lm", "rm"]:
            data = tracks_data[key][i] if i < len(tracks_data[key]) else None
            track = data["track"] if data else None

            entry.update(
                {
                    "{}_name".format(key): track.fingerprint if track else "",
                    "{}_uid".format(key): track.uid if track else "",
                }
            )

            point = track.positionValue if track else None
            entry.update(
                {
                    "{}_x".format(key): point.x if point else "",
                    "{}_dx".format(key): point.xUnc if point else "",
                    "{}_y".format(key): point.y if point else "",
                    "{}_dy".format(key): point.yUnc if point else "",
                }
            )

            length = track.widthValue if track else None
            entry.update(
                {
                    "{}_l".format(key): length.value if length else "",
                    "{}_dl".format(key): length.uncertainty if length else "",
                }
            )

            width = track.widthValue if track else None
            entry.update(
                {
                    "{}_w".format(key): width.value if width else "",
                    "{}_dw".format(key): width.uncertainty if width else "",
                }
            )

        csv.createRow(**entry)

    csv.save()
Esempio n. 17
0
class SimpleGaugeStage(CurveOrderedAnalysisStage):
    """A class for..."""

#===============================================================================
#                                                                                       C L A S S

    _GAUGE_DATA_NT = namedtuple('GAUGE_DATA_NT', ['abs', 'width', 'pace', 'stride'])

#_______________________________________________________________________________
    def __init__(self, key, owner, **kwargs):
        """Creates a new instance of SimpleGaugeStage."""
        super(SimpleGaugeStage, self).__init__(
            key, owner,
            label='Simple Track Gauge',
            **kwargs)
        self._paths  = []
        self._errorTracks = []
        self._ignoreTracks = []
        self._trackwayGauges = None
        self._count = 0
        self._trackwayCsv = None

#===============================================================================
#                                                                               P R O T E C T E D

#_______________________________________________________________________________
    def _preAnalyze(self):
        self._trackwayGauges = self._GAUGE_DATA_NT([], [], [], [])
        self._paths = []
        self._errorTracks = []
        self._ignoreTracks = []
        self._count = 0

        self._trackwayCsv = CsvWriter(
            path=self.getPath('Trackway-Gauge-Averages.csv'),
            autoIndexFieldName='Index',
            fields=[
                ('name', 'Name'),
                ('count', 'Pes Count'),
                ('abs', 'Absolute'),
                ('absUnc', 'Abs Unc'),
                ('stride', 'Stride Norm'),
                ('strideUnc', 'Stride Unc'),
                ('pace', 'Pace Norm'),
                ('paceUnc', 'Pace Unc'),
                ('width', 'Width Norm'),
                ('widthUnc', 'Width Unc')])

#_______________________________________________________________________________
    def _analyzeSitemap(self, sitemap):
        self._createDrawing(sitemap, 'SIMPLE-GAUGE', 'Simple-Gauge')
        super(SimpleGaugeStage, self)._analyzeSitemap(sitemap)
        self._saveDrawing(sitemap)

#_______________________________________________________________________________
    def _analyzeTrackway(self, trackway, sitemap):
        bundle = self.owner.getSeriesBundle(trackway)
        if not bundle.isReady:
            # Skip trackways that have incomplete series
            return

        data = self._collectGaugeData(bundle, trackway, sitemap)
        if data['gauges'].abs:
            self._processGaugeData(bundle, trackway, data)

#_______________________________________________________________________________
    def _collectGaugeData(self, bundle, trackway, sitemap):
        """ Collects the trackway gauge data by generating projections for each series and then
            iterating over every track in the trackway and extracting the gauge information from
            the CurveSeries projection data.

            @param bundle: TrackSeriesBundle
            @param trackway: Tracks_Trackway
            @param sitemap: Tracks_Sitemap
            @return: dict """

        trackway.cache.set('data', {'points':[], 'gauges':self._GAUGE_DATA_NT([], [], [], [])})

        for key, series in bundle.items():
            series.cache.set('referenceWidth', series.averageTrackWidth)
            if series.count < 2:
                continue

            curve = CurveSeries(stage=self, series=series)
            try:
                curve.compute()
            except Exception as err:
                self.logger.writeError([
                    '[ERROR]: Failed to compute track curve projection',
                    'TRACKWAY: %s' % trackway.name,
                    'SERIES: %s[%s]' % (series.fingerprint, series.count) ], err)
                raise

            curve.draw(sitemap.cache.get('drawing'), drawPairs=False)
            series.cache.set('curve', curve)

        super(SimpleGaugeStage, self)._analyzeTrackway(trackway=trackway, sitemap=sitemap)

        for key, series in bundle.items():
            series.cache.remove('referenceWidth')
        return trackway.cache.extract('data')

#_______________________________________________________________________________
    def _processGaugeData(self, bundle, trackway, data):
        pesCount = bundle.leftPes.count + bundle.rightPes.count
        record = {'name':trackway.name, 'count':pesCount}

        gaugeData = data['gauges']

        try:
            value = NumericUtils.getWeightedMeanAndDeviation(gaugeData.abs)
            record['abs'] = value.value
            record['absUnc'] = value.uncertainty
            self._trackwayGauges.abs.append((pesCount, value))
        except ZeroDivisionError:
            return

        widthValue = NumericUtils.getWeightedMeanAndDeviation(gaugeData.width)
        record['width'] = widthValue.value
        record['widthUnc'] = widthValue.uncertainty
        self._trackwayGauges.width.append((pesCount, widthValue))

        if gaugeData.pace:
            value = NumericUtils.getWeightedMeanAndDeviation(gaugeData.pace)
            record['pace'] = value.value
            record['paceUnc'] = value.uncertainty
            self._trackwayGauges.pace.append((pesCount, value))
        else:
            record['pace'] = ''
            record['paceUnc'] = ''

        if gaugeData.stride:
            value = NumericUtils.getWeightedMeanAndDeviation(gaugeData.stride)
            record['stride'] = value.value
            record['strideUnc'] = value.uncertainty
            self._trackwayGauges.stride.append((pesCount, value))
        else:
            record['stride'] = ''
            record['strideUnc'] = ''

        self._trackwayCsv.addRow(record)

        plot = ScatterPlot(
            data=data['points'],
            title='%s Width-Normalized Gauges (%s)' % (trackway.name, widthValue.label),
            xLabel='Track Position (m)',
            yLabel='Gauge (AU)')
        self._paths.append(plot.save(self.getTempFilePath(extension='pdf')))

        analysisTrackway = trackway.getAnalysisPair(self.analysisSession)
        analysisTrackway.simpleGauge = widthValue.raw
        analysisTrackway.simpleGaugeUnc = widthValue.rawUncertainty

#_______________________________________________________________________________
    def _analyzeTrack(self, track, series, trackway, sitemap):
        if series.count < 1 or not track.pes:
            return

        segmentPair = None
        segmentSeries = None
        skipped = 0
        for key, otherSeries in series.bundle.items():
            if not otherSeries.pes:
                continue

            if otherSeries == series or otherSeries.left == series.left or otherSeries.count < 2:
                # If the series isn't suitable for comparison then mark this as a skipped attempt
                # and continue.
                skipped += 1
                continue

            segment = otherSeries.cache.get('curve').getTrackSegment(track)
            if segment is None:
                continue

            for pair in segment.pairs:
                if pair['track'] != track:
                    continue
                if segmentPair is None or pair['line'].length.raw < segmentPair['line'].length.raw:
                    # Store the shortest of the available gauge lengths
                    segmentPair = pair
                    segmentSeries = otherSeries
                break

        if skipped == 4:
            # If skipped is 4 it means that no suitable series existed for calculating a gauge
            # value and the method should abort quietly
            self._ignoreTracks.append(track)
            return

        if segmentPair is None:
            self._errorTracks.append(track)
            return

        color = 'blue' if segmentSeries.pes == series.pes else 'orange'

        data = trackway.cache.get('data')
        gauges = data['gauges']

        line = segmentPair['line']
        sitemap.cache.get('drawing').lineSegment(
            line, stroke=color, stroke_width=1, stroke_opacity='0.5')

        length = line.length
        gauges.abs.append(length)

        analysisTrack = track.getAnalysisPair(self.analysisSession)
        analysisTrack.simpleGauge = line.length.raw
        analysisTrack.simpleGaugeUnc = line.length.rawUncertainty

        widthNormGauge = NumericUtils.divideValueUncertainties(
            numeratorValue=length,
            denominatorValue=series.cache.get('referenceWidth'))
        gauges.width.append(widthNormGauge)

        point = PositionValue2D(
            x=analysisTrack.curvePosition, xUnc=0.0,
            y=widthNormGauge.value, yUnc=widthNormGauge.uncertainty)
        data['points'].append(point)

        if analysisTrack.paceLength:
            gauges.pace.append(NumericUtils.divideValueUncertainties(
                length, analysisTrack.paceLengthValue))

        if analysisTrack.strideLength:
            gauges.stride.append(NumericUtils.divideValueUncertainties(
                length, analysisTrack.strideLengthValue))

        self._count += 1

#_______________________________________________________________________________
    def _postAnalyze(self):
        self.logger.write('%s gauge calculated tracks' % self._count)

        self._trackwayCsv.save()

        csv = CsvWriter(
            path=self.getPath('Simple-Gauge-Errors.csv', isFile=True),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint') ])
        for track in self._errorTracks:
            csv.createRow(uid=track.uid, fingerprint=track.fingerprint)
        csv.save()

        if self._errorTracks:
            self.logger.write('Failed to calculate gauge for %s tracks' % len(self._errorTracks))

        csv = CsvWriter(
            path=self.getPath('Simple-Gauge-Ignores.csv', isFile=True),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint') ])
        for track in self._ignoreTracks:
            csv.createRow(uid=track.uid, fingerprint=track.fingerprint)
        csv.save()

        if self._ignoreTracks:
            self.logger.write('%s tracks lacked suitability for gauge calculation' % len(
                self._ignoreTracks))

        plotData = [
            ('stride', 'red', 'AU', 'Stride-Normalized Weighted'),
            ('pace', 'green', 'AU', 'Pace-Normalized Weighted'),
            ('width', 'blue', 'AU', 'Width-Normalized Weighted'),
            ('abs', 'purple', 'm', 'Absolute Unweighted') ]

        for data in plotData:
            out = []
            source = ListUtils.sortListByIndex(
                source=getattr(self._trackwayGauges, StringUtils.toStr2(data[0])),
                index=0,
                inPlace=True)

            for item in source:
                out.append(PositionValue2D(x=len(out), y=item[1].value, yUnc=item[1].uncertainty))
            self._plotTrackwayGauges(out, *data[1:])

        self.mergePdfs(self._paths, 'Gauges.pdf')

#_______________________________________________________________________________
    def _plotTrackwayGauges(self, points, color, unit, heading):

        histData = []
        for p in points:
            histData.append(p.yValue.value)

        plot = Histogram(
            data=histData,
            title='%s Trackway Gauges' % heading,
            xLabel='Averaged Trackway Gauge (%s)' % unit,
            yLabel='Frequency',
            color=color)
        self._paths.insert(0, plot.save(self.getTempFilePath(extension='pdf')))

        plot = ScatterPlot(
            data=ListUtils.sortObjectList(points, 'y', inPlace=True),
            title='%s Trackway Gauges' % heading,
            xLabel='Trackway Pes Count (#)',
            yLabel='Averaged Trackway Gauge (%s)' % unit,
            color=color)
        self._paths.insert(0, plot.save(self.getTempFilePath(extension='pdf')))