예제 #1
0
    sys.exit(1)

tracksModel = Tracks_Track.MASTER
tracksSession = tracksModel.createSession()

analysisModel = Analysis_Track.MASTER
analysisSession = analysisModel.createSession()

data = pd.read_csv(CSV_FILE)

def removeTrack(track):
    analysisTrack = track.getAnalysisPair(analysisSession)
    if analysisTrack:
        analysisSession.delete(analysisTrack)

    tracksSession.delete(track)

for index, row in data.iterrows():
    uid = StringUtils.toText(row.UID)
    tracks = Tracks_Track.removeTracksByUid(uid, tracksSession, analysisSession)
    for track in tracks:
        print('[REMOVED]: %s (%s)' % (track.fingerprint, track.uid))

tracksSession.commit()
analysisSession.commit()

print('Removal Operation Complete')



예제 #2
0
    def read(self, session, analysisSession, path =None, compressed =False):
        """ Reads from the spreadsheet located at the absolute path argument and adds each row
            to the tracks in the database. """

        if path is not None:
            self._path = path
        if self._path is None:
            return False

        model = Tracks_Track.MASTER
        for existingTrack in session.query(model).all():
            self.remainingTracks[existingTrack.uid] = existingTrack.fingerprint

        try:
            data = pd.read_csv(self._path)
        except Exception as err:
            self._writeError({
                'message':'ERROR: Unable to read CSV file "%s"' % self._path,
                'error':err })
            return

        if data is None:
            self._writeError({
                'message':'ERROR: Failed to create CSV reader for file "%s"' % self._path })
            return

        for index, row in data.iterrows():
            # Skip any rows that don't start with the proper numeric index value, which
            # includes the header row (if it exists) with the column names
            try:
                index = int(row[0])
            except Exception:
                continue

            rowDict = dict()
            for column in Reflection.getReflectionList(TrackCsvColumnEnum):
                value = row[column.index]

                if value and StringUtils.isStringType(value) and not StringUtils.isTextType(value):
                    # Try to decode the value into a unicode string using common codecs
                    for codec in ['utf8', 'MacRoman', 'utf16']:
                        try:
                            decodedValue = value.decode(codec)
                            if decodedValue:
                                value = decodedValue
                                break
                        except Exception:
                            continue

                try:
                    # Check to see if the value is NaN, and if it is replace it with an empty
                    # string to be ignored during import
                    value = '' if np.isnan(value) else value
                except Exception:
                    pass

                if value != '' or value is None:
                    rowDict[column.name] = value

            self.fromSpreadsheetEntry(rowDict, session)

        for uid, fingerprint in DictUtils.iter(self.remainingTracks):
            # Iterate through the list of remaining tracks, which are tracks not found by the
            # importer. If the track is marked as custom (meaning it is not managed by the importer)
            # it is ignored. Otherwise, the track is deleted from the database as a track that no
            # longer exists.

            track = Tracks_Track.MASTER.getByUid(uid, session)
            if track.custom:
                continue

            Tracks_Track.removeTrack(track, analysisSession)
            self._logger.write('[REMOVED]: No longer exists "%s" (%s)' % (
                track.fingerprint, track.uid))

        session.flush()

        for track in self.created:
            self._logger.write('[CREATED]: "%s" (%s)' % (track.fingerprint, track.uid))

        return True
예제 #3
0
    print('[FATAL]: Invalid csv file. Operation aborted.')
    sys.exit(2)

model = Tracks_Track.MASTER
session = model.createSession()

try:
    for index, row in data.iterrows():
        # For each row in the source spreadsheet file, create a new track if no such track exists

        if row.site == 'FAKE':
            # Skip tracks marked with the site name FAKE as they represent file structure
            # formatting examples and not real tracks
            continue

        t = Tracks_Track()
        t.custom = True

        t.site = StringUtils.toText(row.site)
        t.sector = StringUtils.toText(row.sector)
        t.level = StringUtils.toText(row.level)
        t.trackwayNumber = StringUtils.toText(row.trackwayNumber)
        t.name = StringUtils.toText(row.trackwayName)

        t.trackwayType = 'S'
        t.year = '2014'
        t.index = -1

        existing = t.findExistingTracks(session=session)
        if existing:
            # Do not create a track if the fingerprint for the new track matches one already found
예제 #4
0
# trackCsvImport
# (C)2015
# Scott Ernst

from __future__ import print_function, absolute_import, unicode_literals, division

from pyglass.app.PyGlassEnvironment import PyGlassEnvironment
PyGlassEnvironment.initializeFromInternalPath(__file__)

from cadence.data.TrackCsvImporter import TrackCsvImporter
from cadence.models.analysis.Analysis_Track import Analysis_Track
from cadence.models.tracks.Tracks_Track import Tracks_Track

PATH = '/Users/scott/Dropbox/a16/spreadsheets/BEB_S_500/BEB_500.csv'

session = Tracks_Track.createSession()
aSession = Analysis_Track.createSession()

importer = TrackCsvImporter(path=PATH)
importer.read(session, aSession)

for t in importer.created:
    print('[CREATED]: %s (%s)' % (t.fingerprint, t.uid))

print('%s Tracks Created' % len(importer.created))
print('%s Tracks Modified' % len(importer.modified))

session.commit()
aSession.commit()