Esempio n. 1
0
    def _postAnalyze(self):
        """ Write the logs. """

        self._paths = []

        self.logger.write(
            '%s\nFRACTIONAL ERROR (Measured vs Entered)' % ('='*80))
        self._process('Error', 'wDev', 'lDev', self.trackDeviations)

        self.logger.write('%s\nFRACTIONAL UNCERTAINTY ERROR' % ('='*80))
        self._process(
            'Uncertainty Error', 'wDelta', 'lDelta', None, absoluteOnly=True)

        csv = CsvWriter(
            path=self.getPath('Length-Width-Deviations.csv'),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint'),
                ('wDelta', 'Width Deviation'),
                ('lDelta', 'Length Deviation') ])

        for entry in self.entries:
            track = entry['track']
            csv.createRow(
                uid=track.uid,
                fingerprint=track.fingerprint,
                wDelta=entry.get('wDelta', -1.0),
                lDelta=entry.get('lDelta', -1.0) )
        csv.save()

        self._processAspectRatios()

        self.mergePdfs(self._paths)
Esempio n. 2
0
    def _postAnalyze(self):
        self.logger.write('%s gauge calculated tracks' % self._count)

        self._trackwayCsv.save()

        csv = CsvWriter(
            path=self.getPath('Simple-Gauge-Errors.csv', isFile=True),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint') ])
        for track in self._errorTracks:
            csv.createRow(uid=track.uid, fingerprint=track.fingerprint)
        csv.save()

        if self._errorTracks:
            self.logger.write('Failed to calculate gauge for %s tracks' % len(self._errorTracks))

        csv = CsvWriter(
            path=self.getPath('Simple-Gauge-Ignores.csv', isFile=True),
            autoIndexFieldName='Index',
            fields=[
                ('uid', 'UID'),
                ('fingerprint', 'Fingerprint') ])
        for track in self._ignoreTracks:
            csv.createRow(uid=track.uid, fingerprint=track.fingerprint)
        csv.save()

        if self._ignoreTracks:
            self.logger.write('%s tracks lacked suitability for gauge calculation' % len(
                self._ignoreTracks))

        plotData = [
            ('stride', 'red', 'AU', 'Stride-Normalized Weighted'),
            ('pace', 'green', 'AU', 'Pace-Normalized Weighted'),
            ('width', 'blue', 'AU', 'Width-Normalized Weighted'),
            ('abs', 'purple', 'm', 'Absolute Unweighted') ]

        for data in plotData:
            out = []
            source = ListUtils.sortListByIndex(
                source=getattr(self._trackwayGauges, StringUtils.toStr2(data[0])),
                index=0,
                inPlace=True)

            for item in source:
                out.append(PositionValue2D(x=len(out), y=item[1].value, yUnc=item[1].uncertainty))
            self._plotTrackwayGauges(out, *data[1:])

        self.mergePdfs(self._paths, 'Gauges.pdf')
Esempio n. 3
0
    def _process(
            self, label, widthKey, lengthKey, trackDeviations,
            absoluteOnly =False
    ):
        """_process doc..."""
        pl  = self.plot
        ws  = []
        ls  = []
        w2D = []
        l2D = []

        for entry in self.entries:
            if widthKey in entry:
                ws.append(entry[widthKey])
                if lengthKey in entry:
                    w2D.append(entry[widthKey])

            if lengthKey in entry:
                ls.append(entry[lengthKey])
                if widthKey in entry:
                    l2D.append(entry[lengthKey])

        plotList = [
            ('widths', ws, 'Width', 'b'),
            ('lengths', ls, 'Length', 'r')]

        wRes = NumericUtils.getMeanAndDeviation(ws)
        self.logger.write('Width %ss' % wRes.label)
        lRes = NumericUtils.getMeanAndDeviation(ls)
        self.logger.write('Length %ss' % lRes.label)

        for data in plotList:
            if not absoluteOnly:
                d = data[1]
                self._paths.append(
                    self._makePlot(
                        label, d, data,
                        histRange=(-1.0, 1.0)))
                self._paths.append(
                    self._makePlot(
                        label, d, data,
                        isLog=True,
                        histRange=(-1.0, 1.0)))

            # noinspection PyUnresolvedReferences
            d = np.absolute(np.array(data[1]))
            self._paths.append(
                self._makePlot(
                    'Absolute ' + label, d, data,
                    histRange=(0.0, 1.0)))
            self._paths.append(
                self._makePlot(
                    'Absolute ' + label, d, data,
                    isLog=True,
                    histRange=(0.0, 1.0)))

        self.owner.createFigure('twoD')
        pl.hist2d(w2D, l2D, bins=20, range=([-1, 1], [-1, 1]))
        pl.title('2D %s Distribution' % label)
        pl.xlabel('Width %s' % label)
        pl.ylabel('Length %s' % label)
        pl.xlim(-1.0, 1.0)
        pl.ylim(-1.0, 1.0)
        path = self.getTempPath(
            '%s.pdf' % StringUtils.getRandomString(16),
            isFile=True)
        self.owner.saveFigure('twoD', path)
        self._paths.append(path)

        csv = CsvWriter()
        csv.path = self.getPath(
            '%s-Deviations.csv' % label.replace(' ', '-'),
            isFile=True)
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('wSigma', 'Width Deviation'),
            ('lSigma', 'Length Deviation') )

        count = 0
        for entry in self.entries:
            widthDevSigma  = NumericUtils.roundToOrder(
                abs(entry.get(widthKey, 0.0)/wRes.uncertainty), -2)
            lengthDevSigma = NumericUtils.roundToOrder(
                abs(entry.get(lengthKey, 0.0)/lRes.uncertainty), -1)
            if widthDevSigma > 2.0 or lengthDevSigma > 2.0:
                count += 1
                track = entry['track']
                data = dict(
                    wSigma=widthDevSigma,
                    lSigma=lengthDevSigma)

                if trackDeviations is not None:
                    trackDeviations[track.uid] = data

                csv.createRow(
                    uid=track.uid,
                    fingerprint=track.fingerprint,
                    **data)

        if not csv.save():
            self.logger.write(
                '[ERROR]: Failed to save CSV file to %s' % csv.path)

        percentage = NumericUtils.roundToOrder(
            100.0*float(count)/float(len(self.entries)), -2)
        self.logger.write('%s significant %ss (%s%%)' % (
            count, label.lower(), percentage))
        if percentage > (100.0 - 95.45):
            self.logger.write(
                '[WARNING]: Large deviation count exceeds' +
                'normal distribution expectations.')
Esempio n. 4
0
def write_to_file(trackway, tracks_data):
    """
    @param trackway:
    @param tracks_data:
    @return:
    """

    csv = CsvWriter(
        path="{}.csv".format(trackway.name),
        autoIndexFieldName="Index",
        fields=[
            "lp_name",
            "lp_uid",
            "lp_x",
            "lp_dx",
            "lp_y",
            "lp_dy",
            "lp_w",
            "lp_dw",
            "lp_l",
            "lp_dl",
            "rp_name",
            "rp_uid",
            "rp_x",
            "rp_dx",
            "rp_y",
            "rp_dy",
            "rp_w",
            "rp_dw",
            "rp_l",
            "rp_dl",
            "lm_name",
            "lm_uid",
            "lm_x",
            "lm_dx",
            "lm_y",
            "lm_dy",
            "lm_w",
            "lm_dw",
            "lm_l",
            "lm_dl",
            "rm_name",
            "rm_uid",
            "rm_x",
            "rm_dx",
            "rm_y",
            "rm_dy",
            "rm_w",
            "rm_dw",
            "rm_l",
            "rm_dl",
        ],
    )

    count = max([len(ts) if ts else 0 for ts in tracks_data.values()])

    for i in range(count):
        entry = {}

        for key in ["lp", "rp", "lm", "rm"]:
            data = tracks_data[key][i] if i < len(tracks_data[key]) else None
            track = data["track"] if data else None

            entry.update(
                {
                    "{}_name".format(key): track.fingerprint if track else "",
                    "{}_uid".format(key): track.uid if track else "",
                }
            )

            point = track.positionValue if track else None
            entry.update(
                {
                    "{}_x".format(key): point.x if point else "",
                    "{}_dx".format(key): point.xUnc if point else "",
                    "{}_y".format(key): point.y if point else "",
                    "{}_dy".format(key): point.yUnc if point else "",
                }
            )

            length = track.widthValue if track else None
            entry.update(
                {
                    "{}_l".format(key): length.value if length else "",
                    "{}_dl".format(key): length.uncertainty if length else "",
                }
            )

            width = track.widthValue if track else None
            entry.update(
                {
                    "{}_w".format(key): width.value if width else "",
                    "{}_dw".format(key): width.uncertainty if width else "",
                }
            )

        csv.createRow(**entry)

    csv.save()