Ejemplo n.º 1
0
    def _preAnalyze(self):
        """_preDeviations doc..."""
        self.noData = 0
        self.entries = []
        self._paths = []

        self.initializeFolder(self.MAPS_FOLDER_NAME)

        csv = CsvWriter()
        csv.path = self.getPath('Pace-Length-Deviations.csv', isFile=True)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('entered', 'Field'),
            ('measured', 'Measured'),
            ('dev', 'Deviation'),
            ('delta', 'Fractional Error'),
            ('pairedFingerprint', 'Track Pair Fingerprint'),
            ('pairedUid', 'Track Pair UID') )
        self._csv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Pace-Match-Errors.csv', isFile=True)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('measured', 'Measured') )
        self._errorCsv = csv
Ejemplo n.º 2
0
    def _preAnalyze(self):
        self._tracks = []

        csv = CsvWriter()
        csv.path = self.getPath('Origin-Located.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint') )
        self._csv = csv
Ejemplo n.º 3
0
    def _preAnalyze(self):
        self._uncs = []
        self._tracks = []

        self.initializeFolder(self.DRAWING_FOLDER_NAME)

        csv = CsvWriter()
        csv.path = self.getPath("Large-Rotational-Uncertainties.csv")
        csv.autoIndexFieldName = "Index"
        csv.addFields(("uid", "UID"), ("fingerprint", "Fingerprint"), ("rotation", "Rotation"))
        self._largeUncCsv = csv
Ejemplo n.º 4
0
    def _preAnalyze(self):
        self._trackways = []
        self._densityPlots = dict()

        fields = [
            ('name', 'Name'),
            ('length', 'Length'),
            ('gauge', 'Gauge'),
            ('gaugeUnc', 'Gauge Uncertainty'),
            ('widthNormGauge', 'Width Normalized Gauge'),
            ('widthNormGaugeUnc', 'Width Normalized Gauge Uncertainty'),
            ('strideLength', 'Stride Length'),
            ('strideLengthUnc', 'Stride Length Uncertainty'),
            ('paceLength', 'Pace Length'),
            ('paceLengthUnc', 'Pace Length Uncertainty'),
            ('density', 'Density'),
            ('densityNorm', 'Normalize Density'),
            ('densityNormUnc', 'Normalize Density Uncertainty'),
            ('pesWidth', 'Pes Width'),
            ('pesWidthUnc', 'Pes Width Uncertainty'),
            ('pesLength', 'Pes Length'),
            ('pesLengthUnc', 'Pes Length Uncertainty'),
            ('manusWidth', 'Manus Width'),
            ('manusWidthUnc', 'Manus Width Uncertainty'),
            ('manusLength', 'Manus Length'),
            ('manusLengthUnc', 'Manus Length Uncertainty') ]

        csv = CsvWriter()
        csv.path = self.getPath(self.TRACKWAY_STATS_CSV)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(*fields)
        self._weightedStats = csv

        csv = CsvWriter()
        csv.path = self.getPath(self.UNWEIGHTED_TRACKWAY_STATS_CSV)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(*fields)
        self._unweightedStats = csv
Ejemplo n.º 5
0
    def _preAnalyze(self):
        self._tracks = []

        csv = CsvWriter()
        csv.path = self.getPath('Track-Priority.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('priority', 'Priority'),
            ('preserved', 'Preserved'),
            ('cast', 'Cast'),
            ('outlined', 'Outlined') )
        self._csv = csv
Ejemplo n.º 6
0
    def _preAnalyze(self):
        self._uncs   = []
        self._tracks = []

        self.initializeFolder(self.DRAWING_FOLDER_NAME)

        csv = CsvWriter()
        csv.path = self.getPath('Large-Spatial-Uncertainties.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('x', 'X'),
            ('z', 'Z') )
        self._largeUncCsv = csv
Ejemplo n.º 7
0
    def _preAnalyze(self):
        """_preDeviations doc..."""
        self.noData = 0
        self.entries = []
        self._paths = []

        csv = CsvWriter()
        csv.path = self.getPath('Stride-Length-Deviations.csv', isFile=True)
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('entered', 'Entered'),
            ('measured', 'Measured'),
            ('dev', 'Deviation'),
            ('delta', 'Fractional Error'))
        self._csv = csv
Ejemplo n.º 8
0
    def _addQuartileEntry(self, label, trackway, data):
        if not data or len(data) < 4:
            return

        if label not in self._quartileStats:
            csv = CsvWriter()
            csv.path = self.getPath(
                '%s-Quartiles.csv' % label.replace(' ', '-'),
                isFile=True)
            csv.autoIndexFieldName = 'Index'
            csv.addFields(
                ('name', 'Name'),

                ('normality', 'Normality'),
                ('unweightedNormality', 'Unweighted Normality'),

                ('unweightedLowerBound', 'Unweighted Lower Bound'),
                ('unweightedLowerQuart', 'Unweighted Lower Quartile'),
                ('unweightedMedian',     'Unweighted Median'),
                ('unweightedUpperQuart', 'Unweighted Upper Quartile'),
                ('unweightedUpperBound', 'Unweighted Upper Bound'),

                ('lowerBound', 'Lower Bound'),
                ('lowerQuart', 'Lower Quartile'),
                ('median',     'Median'),
                ('upperQuart', 'Upper Quartile'),
                ('upperBound', 'Upper Bound'),

                ('diffLowerBound', 'Diff Lower Bound'),
                ('diffLowerQuart', 'Diff Lower Quartile'),
                ('diffMedian',     'Diff Median'),
                ('diffUpperQuart', 'Diff Upper Quartile'),
                ('diffUpperBound', 'Diff Upper Bound') )
            self._quartileStats[label] = csv

        csv = self._quartileStats[label]
        dd = mstats.density.Distribution(data)
        unweighted = mstats.density.boundaries.unweighted_two(dd)
        weighted = mstats.density.boundaries.weighted_two(dd)

        #-----------------------------------------------------------------------
        # PLOT DENSITY
        #   Create a density plot for each value
        p = MultiScatterPlot(
            title='%s %s Density Distribution' % (trackway.name, label),
            xLabel=label,
            yLabel='Probability (AU)')

        x_values = mstats.density.ops.adaptive_range(dd, 10.0)
        y_values = dd.probabilities_at(x_values=x_values)

        p.addPlotSeries(
            line=True,
            markers=False,
            label='Weighted',
            color='blue',
            data=zip(x_values, y_values)
        )

        temp = mstats.density.create_distribution(
            dd.naked_measurement_values(raw=True)
        )
        x_values = mstats.density.ops.adaptive_range(dd, 10.0)
        y_values = dd.probabilities_at(x_values=x_values)

        p.addPlotSeries(
            line=True,
            markers=False,
            label='Unweighted',
            color='red',
            data=zip(x_values, y_values)
        )

        if label not in self._densityPlots:
            self._densityPlots[label] = []
        self._densityPlots[label].append(
            p.save(self.getTempFilePath(extension='pdf')))

        #-----------------------------------------------------------------------
        # NORMALITY
        #       Calculate the normality of the weighted and unweighted
        #       distributions as a test against how well they conform to
        #       the Normal distribution calculated from the unweighted data.
        #
        #       The unweighted Normality test uses a basic bandwidth detection
        #       algorithm to create a uniform Gaussian kernel to populate the
        #       DensityDistribution. It is effectively a density kernel
        #       estimation, but is aggressive in selecting the bandwidth to
        #       prevent over-smoothing multi-modal distributions.
        if len(data) < 8:
            normality = -1.0
            unweightedNormality = -1.0
        else:
            result = NumericUtils.getMeanAndDeviation(data)
            mean = result.raw
            std = result.rawUncertainty
            normality = mstats.density.ops.overlap(
                dd,
                mstats.density.create_distribution([mean], [std])
            )

            rawValues = []
            for value in data:
                rawValues.append(value.value)
            ddRaw = mstats.density.create_distribution(rawValues)
            unweightedNormality = mstats.density.ops.overlap(
                ddRaw,
                mstats.density.create_distribution([mean], [std])
            )

        # Prevent divide by zero
        unweighted = [
            0.00001 if NumericUtils.equivalent(x, 0) else x
            for x in unweighted
        ]

        csv.addRow({
            'index':trackway.index,
            'name':trackway.name,

            'normality':normality,
            'unweightedNormality':unweightedNormality,

            'unweightedLowerBound':unweighted[0],
            'unweightedLowerQuart':unweighted[1],
            'unweightedMedian'    :unweighted[2],
            'unweightedUpperQuart':unweighted[3],
            'unweightedUpperBound':unweighted[4],

            'lowerBound':weighted[0],
            'lowerQuart':weighted[1],
            'median'    :weighted[2],
            'upperQuart':weighted[3],
            'upperBound':weighted[4],

            'diffLowerBound':abs(unweighted[0] - weighted[0])/unweighted[0],
            'diffLowerQuart':abs(unweighted[1] - weighted[1])/unweighted[1],
            'diffMedian'    :abs(unweighted[2] - weighted[2])/unweighted[2],
            'diffUpperQuart':abs(unweighted[3] - weighted[3])/unweighted[3],
            'diffUpperBound':abs(unweighted[4] - weighted[4])/unweighted[4]
        })
Ejemplo n.º 9
0
    def _preAnalyze(self):
        csv = CsvWriter()
        csv.path = self.getPath('Solo-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint') )
        self._soloTrackCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Corrupt-Track-Report.csv')
        csv.removeIfSavedEmpty = True
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('i', 'Database Index'),
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('reason', 'Reason'))
        self._badTrackCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Unprocessed-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('previous', 'Previous Track UID'),
            ('next', 'Next Track UID') )
        self._unprocessedCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Unknown-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('fingerprint', 'Fingerprint'),
            ('hidden', 'Hidden'),
            ('complete', 'Complete') )
        self._unknownCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Ignored-Track-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('uid', 'UID'),
            ('sitemap', 'Sitemap Name'),
            ('fingerprint', 'Fingerprint'),
            ('hidden', 'Hidden'),
            ('orphan', 'Orphaned') )
        self._orphanCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Sitemap-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('name', 'Sitemap Name'),
            ('unprocessed', 'Unprocessed'),
            ('ignores', 'Ignored'),
            ('count', 'Count'),
            ('incomplete', 'Incomplete'),
            ('completion', 'Completion (%)') )
        self._sitemapCsv = csv

        csv = CsvWriter()
        csv.path = self.getPath('Trackway-Report.csv')
        csv.autoIndexFieldName = 'Index'
        csv.addFields(
            ('name', 'Name'),
            ('leftPes', 'Left Pes'),
            ('rightPes', 'Right Pes'),
            ('leftManus', 'Left Manus'),
            ('rightManus', 'Right Manus'),
            ('incomplete', 'Incomplete'),
            ('total', 'Total'),
            ('ready', 'Analysis Ready'),
            ('complete', 'Completion (%)') )
        self._trackwayCsv = csv

        self._allTracks = dict()

        #-------------------------------------------------------------------------------------------
        # CREATE ALL TRACK LISTING
        #       This list is used to find tracks that are not referenced by relationships to
        #       sitemaps, which would never be loaded by standard analysis methods
        model = Tracks_Track.MASTER
        session = model.createSession()
        tracks = session.query(model).all()
        for t in tracks:
            self._checkTrackProperties(t, tracks)
            self._allTracks[t.uid] = dict(
                uid=t.uid,
                fingerprint=t.fingerprint,
                hidden=t.hidden,
                complete=t.isComplete)
        session.close()