def shortFingerprint(self): return ''.join([ 'L' if getattr(self, TrackPropEnum.LEFT.name, False) else 'R', 'P' if getattr(self, TrackPropEnum.PES.name, False) else 'M', StringUtils.toText( getattr(self, TrackPropEnum.NUMBER.name, '0')) .replace('-', 'N') ])
def appendStatus(self, target, message, formatAsHtml =True): w = self.getApplicationLevelWidget('status') if not w.isShowing or w.target != target: return message = StringUtils.toText(message).replace('\r', '') if formatAsHtml: parts = [] for p in message.strip().split('\n'): parts.append(p.replace('\t', ' ')) message = '<br/>'.join(parts) else: message = '<div>%s</div>' % message w.append(message) self.refreshGui()
def fingerprint(self): """ String created from the uniquely identifying track properties. """ TPE = TrackPropEnum return '-'.join([ StringUtils.toText(getattr(self, TPE.SITE.name, '')), StringUtils.toText(getattr(self, TPE.LEVEL.name, '')), StringUtils.toText(getattr(self, TPE.YEAR.name, '')), StringUtils.toText(getattr(self, TPE.SECTOR.name, '')), StringUtils.toText(getattr(self, TPE.TRACKWAY_TYPE.name, '')), StringUtils.toText(getattr(self, TPE.TRACKWAY_NUMBER.name, '0')), 'L' if getattr(self, TPE.LEFT.name, False) else 'R', 'P' if getattr(self, TPE.PES.name, False) else 'M', StringUtils.toText( getattr(self, TPE.NUMBER.name, '0')) .replace('-', 'N') ])
def traceLogMessage(cls, logMessage, callbacks =None, callbackTarget =None): log = logMessage['log'] if 'stack' in logMessage: log += logMessage['stack'] out = cls.asAscii(log) print(StringUtils.toText(out)) try: for cb in callbacks: try: cb(callbackTarget, out) except Exception: pass except Exception: pass return out
def traceLogMessage(cls, logMessage, callbacks=None, callbackTarget=None): log = logMessage['log'] if 'stack' in logMessage: log += logMessage['stack'] out = cls.asAscii(log) print(StringUtils.toText(out)) try: for cb in callbacks: try: cb(callbackTarget, out) except Exception: pass except Exception: pass return out
def trackwayFingerprint(self): TPE = TrackPropEnum out = '-'.join([ StringUtils.toText(getattr(self, TPE.SITE.name, '')), StringUtils.toText(getattr(self, TPE.LEVEL.name, '')), StringUtils.toText(getattr(self, TPE.YEAR.name, '')), StringUtils.toText(getattr(self, TPE.SECTOR.name, '')), StringUtils.toText(getattr(self, TPE.TRACKWAY_TYPE.name, '')), StringUtils.toText(getattr(self, TPE.TRACKWAY_NUMBER.name, '0')) ]) if out == 'TCH-1000-2014-12-S-13BIS': # Fix a naming ambiguity from the catalog out = 'TCH-1000-2006-12-S-13' return out
def save(self): """save doc...""" if self.removeIfSavedEmpty and not self.rows: self.remove() return index = 0 names = self.fieldNames if self.autoIndexFieldName: names.insert(0, self.autoIndexFieldName) fieldNames = [] for name in names: if StringUtils.isTextType(name): name = name.encode('latin-1') fieldNames.append(name) try: if sys.version < '3': args = dict(mode='w') else: args = dict(mode='w', encoding='utf-8') with open(self.path, **args) as f: writer = csv.DictWriter(f, fieldnames=names, dialect=csv.excel) writer.writeheader() for row in self.rows: result = dict() if self.autoIndexFieldName: index += 1 result[self.autoIndexFieldName] = index for key, name in self._fields.items(): value = row.get(key, '') if StringUtils.isBinaryType(value): value = StringUtils.toText(value) result[name] = value writer.writerow(result) return True except Exception as err: raise return False
def _refreshDatabaseDisplay(self): w = self.revisionsListWidget w.clear() if self.databasesListWidget.count() == 0: w.setEnabled(False) self.createRevisionBtn.setEnabled(False) self.initializeBtn.setEnabled(False) return else: w.setEnabled(True) self.createRevisionBtn.setEnabled(True) self.initializeBtn.setEnabled(True) revisions = AlembicUtils.getRevisionList( databaseUrl=self.currentDatabaseUrl, resourcesPath=self.currentAppResourcesPath) for rev in revisions: name = StringUtils.toText(rev.revision) + (' (HEAD)' if rev.is_head else '') DataListWidgetItem(name, w, data=rev)
def _handleAddDatabase(self): result = PyGlassBasicDialogManager.openTextQuery( parent=self, header='Enter Database Name', message='Enter the name of the database as it would appear in the Database URL, e.g. ' +'"activity" or "employees/artists"') if not result: return data = { 'id':TimeUtils.getUidTimecode('DATABASE', StringUtils.slugify(result)), 'label':StringUtils.toText(result).title(), 'name':result } apps = self.appConfig.get('APPLICATIONS') app = apps[self.currentAppID] app['databases'][data['id']] = data self.appConfig.set('APPLICATIONS', apps) self._refreshAppDisplay() resultItem = self.databasesListWidget.findItems(result, QtCore.Qt.MatchExactly) if resultItem: resultItem[0].setSelected(True)
sys.exit(1) tracksModel = Tracks_Track.MASTER tracksSession = tracksModel.createSession() analysisModel = Analysis_Track.MASTER analysisSession = analysisModel.createSession() data = pd.read_csv(CSV_FILE) def removeTrack(track): analysisTrack = track.getAnalysisPair(analysisSession) if analysisTrack: analysisSession.delete(analysisTrack) tracksSession.delete(track) for index, row in data.iterrows(): uid = StringUtils.toText(row.UID) tracks = Tracks_Track.removeTracksByUid(uid, tracksSession, analysisSession) for track in tracks: print('[REMOVED]: %s (%s)' % (track.fingerprint, track.uid)) tracksSession.commit() analysisSession.commit() print('Removal Operation Complete')
def modelsInit(cls, databaseUrl, initPath, initName): out = dict() zipIndex = initPath[0].find(cls._ZIP_FIND) if zipIndex == -1: moduleList = os.listdir(initPath[0]) else: splitIndex = zipIndex + len(cls._ZIP_FIND) zipPath = initPath[0][: splitIndex - 1] modulePath = initPath[0][splitIndex:] z = zipfile.ZipFile(zipPath) moduleList = [] for item in z.namelist(): item = os.sep.join(item.split("/")) if item.startswith(modulePath): moduleList.append(item.rsplit(os.sep, 1)[-1]) # Warn if module initialization occurs before pyglass environment initialization and # then attempt to initialize the environment automatically to prevent errors if not PyGlassEnvironment.isInitialized: cls.logger.write( StringUtils.dedent( """ [WARNING]: Database initialization called before PyGlassEnvironment initialization. Attempting automatic initialization to prevent errors.""" ) ) PyGlassEnvironment.initializeFromInternalPath(initPath[0]) if not cls.upgradeDatabase(databaseUrl): cls.logger.write( StringUtils.dedent( """ [WARNING]: No alembic support detected. Migration support disabled.""" ) ) items = [] for module in moduleList: if module.startswith("__init__.py") or module.find("_") == -1: continue parts = module.rsplit(".", 1) parts[0] = parts[0].rsplit(os.sep, 1)[-1] if not parts[-1].startswith(StringUtils.toStr2("py")) or parts[0] in items: continue items.append(parts[0]) m = None n = None r = None c = None try: n = module.rsplit(".", 1)[0] m = initName + "." + n r = __import__(m, locals(), globals(), [n]) c = getattr(r, StringUtils.toText(n)) out[n] = c if not c.__table__.exists(c.ENGINE): c.__table__.create(c.ENGINE, True) except Exception as err: cls._logger.writeError( [ "MODEL INITIALIZATION FAILURE:", "INIT PATH: %s" % initPath, "INIT NAME: %s" % initName, "MODULE IMPORT: %s" % m, "IMPORT CLASS: %s" % n, "IMPORT RESULT: %s" % r, "CLASS RESULT: %s" % c, ], err, ) return out
model = Tracks_Track.MASTER session = model.createSession() try: for index, row in data.iterrows(): # For each row in the source spreadsheet file, create a new track if no such track exists if row.site == 'FAKE': # Skip tracks marked with the site name FAKE as they represent file structure # formatting examples and not real tracks continue t = Tracks_Track() t.custom = True t.site = StringUtils.toText(row.site) t.sector = StringUtils.toText(row.sector) t.level = StringUtils.toText(row.level) t.trackwayNumber = StringUtils.toText(row.trackwayNumber) t.name = StringUtils.toText(row.trackwayName) t.trackwayType = 'S' t.year = '2014' t.index = -1 existing = t.findExistingTracks(session=session) if existing: # Do not create a track if the fingerprint for the new track matches one already found # in the database print('[WARNING]: Track "%s" already exists. Skipping this entry.') continue
def fromSpreadsheetEntry(self, csvRowData, session): """ From the spreadsheet data dictionary representing raw track data, this method creates a track entry in the database. """ #------------------------------------------------------------------------------------------- # MISSING # Try to determine if the missing value has been set for this row data. If so and it # has been marked missing, skip the track during import to prevent importing tracks # with no data. try: missingValue = csvRowData[TrackCsvColumnEnum.MISSING.name].strip() if missingValue: return False except Exception: pass try: csvIndex = int(csvRowData[TrackCsvColumnEnum.INDEX.name]) except Exception: self._writeError({ 'message':'Missing spreadsheet index', 'data':csvRowData }) return False model = Tracks_Track.MASTER t = model() t.importFlags = 0 t.index = csvIndex #------------------------------------------------------------------------------------------- # SITE try: t.site = csvRowData.get(TrackCsvColumnEnum.TRACKSITE.name).strip().upper() except Exception: self._writeError({ 'message':'Missing track site', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # SECTOR try: t.sector = csvRowData.get(TrackCsvColumnEnum.SECTOR.name).strip().upper() except Exception: self._writeError({ 'message':'Missing sector', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # LEVEL try: t.level = csvRowData.get(TrackCsvColumnEnum.LEVEL.name) except Exception: self._writeError({ 'message':'Missing level', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # TRACKWAY # Parse the trackway entry into type and number values. In the process illegal # characters are removed to keep the format something that can be handled correctly # within the database. try: test = csvRowData.get(TrackCsvColumnEnum.TRACKWAY.name).strip().upper() except Exception: self._writeError({ 'message':'Missing trackway', 'data':csvRowData, 'index':csvIndex }) return False # If the trackway contains an ignore pattern then return without creating the track. # This is used for tracks in the record that are actually under-prints from a higher # level recorded in the spreadsheet only for catalog reference. testIndexes = [ test.find(self._UNDERPRINT_IGNORE_TRACKWAY_STR), test.find(self._OVERPRINT_IGNORE_TRACKWAY_STR) ] testParensIndex = test.find('(') for testIndex in testIndexes: if testIndex != -1 and (testParensIndex == -1 or testParensIndex > testIndex): return False result = self._TRACKWAY_PATTERN.search(test) try: t.trackwayType = result.groupdict()['type'].upper().strip() t.trackwayNumber = result.groupdict()['number'].upper().strip() except Exception: self._writeError({ 'message':'Invalid trackway value: %s' % test, 'data':csvRowData, 'result':result, 'match':result.groupdict() if result else 'N/A', 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # NAME # Parse the name value into left, pes, and number attributes try: t.name = csvRowData.get(TrackCsvColumnEnum.TRACK_NAME.name).strip() except Exception: self._writeError({ 'message':'Missing track name', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # YEAR try: year = csvRowData.get(TrackCsvColumnEnum.MEASURED_DATE.name) if not year: year = '2014' else: try: y = StringUtils.toText(year).split(';')[-1].strip().replace( '/', '_').replace( ' ', '').replace( '-', '_').split('_')[-1] year = int(re.compile('[^0-9]+').sub('', y)) except Exception: year = 2014 if year > 2999: # When multiple year entries combine into a single large number year = int(StringUtils.toUnicode(year)[-4:]) elif year < 2000: # When two digit years (e.g. 09) are used instead of four digit years year += 2000 year = StringUtils.toUnicode(year) t.year = year except Exception: self._writeError({ 'message':'Missing cast date', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # FIND EXISTING # Use data set above to attempt to load the track database entry fingerprint = t.fingerprint for uid, fp in DictUtils.iter(self.remainingTracks): # Remove the fingerprint from the list of fingerprints found in the database, which at # the end will leave only those fingerprints that exist in the database but were not # touched by the importer. These values can be used to identify tracks that should # have been "touched" but were not. if fp == fingerprint: del self.remainingTracks[uid] break existing = t.findExistingTracks(session) if existing and not isinstance(existing, Tracks_Track): existing = existing[0] if fingerprint in self.fingerprints: if not existing: existing = self.fingerprints[fingerprint] self._writeError({ 'message':'Ambiguous track entry "%s" [%s -> %s]' % ( fingerprint, csvIndex, existing.index), 'data':csvRowData, 'existing':existing, 'index':csvIndex }) return False self.fingerprints[fingerprint] = t if existing: t = existing else: session.add(t) session.flush() TCCE = TrackCsvColumnEnum IFE = ImportFlagsEnum #------------------------------------------------------------------------------------------- # CSV PROPERTY CLEANUP # Cleanup and format additional CSV values before saving the csv data to the track's # snapshot. removeNonColumns = [ TrackCsvColumnEnum.PRESERVED.name, TrackCsvColumnEnum.CAST.name, TrackCsvColumnEnum.OUTLINE_DRAWING.name] for columnName in removeNonColumns: if columnName in csvRowData: testValue = StringUtils.toText(csvRowData[columnName]).strip().upper() if testValue.startswith('NON'): del csvRowData[columnName] # Create a snapshot that only includes a subset of properties that are flagged to be # included in the database snapshot entry snapshot = dict() for column in Reflection.getReflectionList(TrackCsvColumnEnum): # Include only values that are marked in the enumeration as to be included if not column.snapshot or column.name not in csvRowData: continue value = csvRowData.get(column.name) if value is None: continue elif not value is StringUtils.isStringType(value): value = StringUtils.toText(value) value = StringUtils.toText(value).strip() if value in ['-', b'\xd0'.decode(b'MacRoman')]: continue snapshot[column.name] = value #------------------------------------------------------------------------------------------- # WIDTH # Parse the width into a numerical value and assign appropriate default uncertainty try: t.widthMeasured = 0.01*float(self._collapseManusPesProperty( t, csvRowData, TCCE.PES_WIDTH, TCCE.PES_WIDTH_GUESS, TCCE.MANUS_WIDTH, TCCE.MANUS_WIDTH_GUESS, '0', IFE.HIGH_WIDTH_UNCERTAINTY, IFE.NO_WIDTH )) t.widthMeasured = t.widthMeasured if not existing or t.widthUncertainty == 0: t.widthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_WIDTH_UNCERTAINTY) else 0.03 except Exception as err: print(Logger().echoError('WIDTH PARSE ERROR:', err)) self._writeError({ 'message':'Width parse error', 'data':csvRowData, 'error':err, 'index':csvIndex }) t.widthMeasured = 0.0 if not existing: t.widthUncertainty = 0.05 #------------------------------------------------------------------------------------------- # LENGTH # Parse the length into a numerical value and assign appropriate default uncertainty try: t.lengthMeasured = 0.01*float(self._collapseManusPesProperty( t, csvRowData, TCCE.PES_LENGTH, TCCE.PES_LENGTH_GUESS, TCCE.MANUS_LENGTH, TCCE.MANUS_LENGTH_GUESS, '0', IFE.HIGH_LENGTH_UNCERTAINTY, IFE.NO_LENGTH )) t.lengthMeasured = t.lengthMeasured if not existing or t.lengthUncertainty == 0: t.lengthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_LENGTH_UNCERTAINTY) else 0.03 except Exception as err: print(Logger().echoError('LENGTH PARSE ERROR:', err)) self._writeError({ 'message':'Length parse error', 'data':csvRowData, 'error':err, 'index':csvIndex }) t.lengthMeasured = 0.0 if not existing: t.lengthUncertainty = 0.05 #------------------------------------------------------------------------------------------- # DEPTH # Parse the depth into a numerical value and assign appropriate default uncertainty try: t.depthMeasured = 0.01*float(self._collapseManusPesProperty( t, csvRowData, TCCE.PES_DEPTH, TCCE.PES_DEPTH_GUESS, TCCE.MANUS_DEPTH, TCCE.MANUS_DEPTH_GUESS, '0', IFE.HIGH_DEPTH_UNCERTAINTY, 0 )) if not existing or t.depthUncertainty == 0: t.depthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_DEPTH_UNCERTAINTY) else 0.03 except Exception as err: print(Logger().echoError('DEPTH PARSE ERROR:', err)) t.depthMeasured = 0.0 if not existing: t.depthUncertainty = 0.05 #------------------------------------------------------------------------------------------- # ROTATION # Parse the rotation into a numerical value and assign appropriate default uncertainty try: t.rotationMeasured = float(self._collapseLimbProperty( t, csvRowData, TCCE.LEFT_PES_ROTATION, TCCE.LEFT_PES_ROTATION_GUESS, TCCE.RIGHT_PES_ROTATION, TCCE.RIGHT_PES_ROTATION_GUESS, TCCE.LEFT_MANUS_ROTATION, TCCE.LEFT_MANUS_ROTATION_GUESS, TCCE.RIGHT_MANUS_ROTATION, TCCE.RIGHT_MANUS_ROTATION_GUESS, '0', IFE.HIGH_ROTATION_UNCERTAINTY, 0 )) if not existing or t.rotationUncertainty == 0: t.rotationUncertainty = \ 10.0 if (t.importFlags & IFE.HIGH_ROTATION_UNCERTAINTY) else 45.0 except Exception as err: print(Logger().echoError('ROTATION PARSE ERROR:', err)) self._writeError({ 'message':'Rotation parse error', 'error':err, 'data':csvRowData, 'index':csvIndex }) t.rotationMeasured = 0.0 if not existing: t.rotationUncertainty = 45.0 #------------------------------------------------------------------------------------------- # STRIDE try: strideLength = self._collapseManusPesProperty( t, csvRowData, TCCE.PES_STRIDE, TCCE.PES_STRIDE_GUESS, TCCE.MANUS_STRIDE, TCCE.MANUS_STRIDE_GUESS, None, IFE.HIGH_STRIDE_UNCERTAINTY ) strideFactor = self._collapseManusPesProperty( t, csvRowData, TCCE.PES_STRIDE_FACTOR, None, TCCE.MANUS_STRIDE_FACTOR, None, 1.0) if strideLength: snapshot[SnapshotDataEnum.STRIDE_LENGTH] = 0.01*float(strideLength)*float(strideFactor) except Exception as err: print(Logger().echoError('STRIDE PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # WIDTH ANGULATION PATTERN try: widthAngulation = self._collapseManusPesProperty( t, csvRowData, TCCE.WIDTH_PES_ANGULATION_PATTERN, TCCE.WIDTH_PES_ANGULATION_PATTERN_GUESS, TCCE.WIDTH_MANUS_ANGULATION_PATTERN, TCCE.WIDTH_MANUS_ANGULATION_PATTERN_GUESS, None, IFE.HIGH_WIDTH_ANGULATION_UNCERTAINTY ) if widthAngulation: snapshot[SnapshotDataEnum.WIDTH_ANGULATION_PATTERN] = 0.01*float(widthAngulation) except Exception as err: print(Logger().echoError('WIDTH ANGULATION PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # PACE try: pace = self._collapseLimbProperty( t, csvRowData, TCCE.LEFT_PES_PACE, TCCE.LEFT_PES_PACE_GUESS, TCCE.RIGHT_PES_PACE, TCCE.RIGHT_PES_PACE_GUESS, TCCE.LEFT_MANUS_PACE, TCCE.LEFT_MANUS_PACE_GUESS, TCCE.RIGHT_MANUS_PACE, TCCE.RIGHT_MANUS_PACE_GUESS, None, IFE.HIGH_PACE_UNCERTAINTY ) if pace: snapshot[SnapshotDataEnum.PACE] = 0.01*float(pace) except Exception as err: print(Logger().echoError('PACE PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # PACE ANGULATION PATTERN try: paceAngulation = self._collapseManusPesProperty( t, csvRowData, TCCE.PES_PACE_ANGULATION, TCCE.PES_PACE_ANGULATION_GUESS, TCCE.MANUS_PACE_ANGULATION, TCCE.MANUS_PACE_ANGULATION_GUESS, None, IFE.HIGH_WIDTH_ANGULATION_UNCERTAINTY ) if paceAngulation: snapshot[SnapshotDataEnum.PACE_ANGULATION_PATTERN] = float(paceAngulation) except Exception as err: print(Logger().echoError('PACE ANGULATION PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # PROGRESSION try: progression = self._collapseLimbProperty( t, csvRowData, TCCE.LEFT_PES_PROGRESSION, TCCE.LEFT_PES_PROGRESSION_GUESS, TCCE.RIGHT_PES_PROGRESSION, TCCE.RIGHT_PES_PROGRESSION_GUESS, TCCE.LEFT_MANUS_PROGRESSION, TCCE.LEFT_MANUS_PROGRESSION_GUESS, TCCE.RIGHT_MANUS_PROGRESSION, TCCE.RIGHT_MANUS_PROGRESSION_GUESS, None, IFE.HIGH_PROGRESSION_UNCERTAINTY ) if progression: snapshot[SnapshotDataEnum.PROGRESSION] = 0.01*float(progression) except Exception as err: print(Logger().echoError('PROGRESSION PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # GLENO-ACETABULAR DISTANCE try: gad = self._collapseGuessProperty( t, csvRowData, TCCE.GLENO_ACETABULAR_DISTANCE, TCCE.GLENO_ACETABULAR_DISTANCE_GUESS, None, IFE.HIGH_GLENO_ACETABULAR_UNCERTAINTY ) if gad: snapshot[SnapshotDataEnum.GLENO_ACETABULAR_LENGTH] = 0.01*float(gad) except Exception as err: print(Logger().echoError('GLENO-ACETABULAR DISTANCE PARSE ERROR:', err)) # Save the snapshot try: t.snapshot = JSON.asString(snapshot) except Exception: raise if TrackCsvColumnEnum.MEASURED_BY.name not in snapshot: # Mark entries that have no field measurements with a flag for future reference t.importFlags |= ImportFlagsEnum.NO_FIELD_MEASUREMENTS if existing: self.modified.append(t) else: self.created.append(t) return t
def __unicode__(self): """__unicode__ doc...""" return StringUtils.toText(self.__str__())
def prettyPrint(self): return StringUtils.toText(NumericUtils.roundToSigFigs(self.degrees, 3))
# TRACK QUERY query = session.query(trackModel) if INDEXES: query = query.filter(trackModel.i.in_(INDEXES)) if UID_BEGINS: # OR together the UID_BEGINS using the startswith query modifier for each entry query = query.filter(sqla.or_(*[trackModel.uid.startswith(start) for start in UID_BEGINS])) if UIDS is None: UIDS = [] if CSV_FILE: # Loads all of the CSV files df = pd.read_csv(CSV_FILE) UIDS.extend([StringUtils.toText(item) for item in list(df.UID)]) if UIDS: query = query.filter(trackModel.uid.in_(UIDS)) tracks = query.all() if UIDS: ordered = [] missing = [] for uid_entry in UIDS: not_found = True for track in tracks: if track.uid == uid_entry: ordered.append(track) not_found = False