def prettyPrint(size, sizeEnum=None, precision=-1): if hasattr(size, 'read') or isinstance(size, str): size = SizeConversion.getSizeOfFile(size) if size == 0: return '0B' sizeInBytes = float( size) if sizeEnum is None else float(size) * sizeEnum['bytes'] sizeOptions = sorted(Reflection.getReflectionList(SIZES), key=itemgetter('bytes')) prev = None for opt in sizeOptions: if size < opt['bytes']: newSize = SizeConversion.convert(int(math.ceil(sizeInBytes)), SIZES.BYTES, prev) outSize = str(newSize) if precision != -1 and outSize.find('.') != -1: parts = outSize.split('.') outSize = parts[0] + '.' + parts[1][:precision] return outSize + (prev['id'] if not prev is None else 'B') prev = opt return 'NaN'
def runPythonExec(script, kwargs =None): from nimble.NimbleEnvironment import NimbleEnvironment from nimble.data.NimbleResponseData import NimbleResponseData from nimble.data.enum.DataKindEnum import DataKindEnum try: nimble.cmds.undoInfo(openChunk=True) except Exception as err: return False try: # Create a new, temporary module in which to run the script module = imp.new_module('runExecTempModule') # Initialize the script with script inputs setattr(module, NimbleEnvironment.REMOTE_KWARGS_KEY, kwargs if kwargs is not None else dict()) setattr(module, NimbleEnvironment.REMOTE_RESULT_KEY, dict()) # Executes the script in the new module exec_(script, module.__dict__) # Find a NimbleScriptBase derived class definition and if it exists, run it to populate the # results for name,value in Reflection.getReflectionDict(module).iteritems(): if not inspect.isclass(value): continue if NimbleScriptBase in value.__bases__: getattr(module, name)().run() break # Retrieve the results object that contains all results set by the execution of the script result = getattr(module, NimbleEnvironment.REMOTE_RESULT_KEY) except Exception as err: logger = Logger('runPythonExec', printOut=True) logger.writeError('ERROR: Failed Remote Script Execution', err) result = NimbleResponseData( kind=DataKindEnum.PYTHON_SCRIPT, response=NimbleResponseData.FAILED_RESPONSE, error=str(err) ) # If a result dictionary contains an error key format the response as a failure try: errorMessage = ArgsUtils.extract( NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result) if errorMessage: return NimbleResponseData( kind=DataKindEnum.PYTHON_SCRIPT, response=NimbleResponseData.FAILED_RESPONSE, error=errorMessage, payload=result) except Exception as err: pass try: nimble.cmds.undoInfo(closeChunk=True) except Exception as err: return False return result
def getAsTShirtEnum(cls, value, defaultEnum =u'none'): value = value.lower().replace(u' ', u'') for enum in Reflection.getReflectionList(GeneralSizeEnum): if value in enum[1]: return value return defaultEnum
def toMayaNodeDict(self): """ Creates a dictionary representation of those properties required for a Maya node. """ TPE = TrackPropEnum out = dict() for enum in Reflection.getReflectionList(TrackPropEnum): if enum.maya: out[enum.maya] = getattr(self, enum.name) # load up the values of left and pes so that they can be used in # assigning shaders out[TPE.LEFT.name] = getattr(self, TPE.LEFT.name) out[TPE.PES.name] = getattr(self, TPE.PES.name) out[TPE.HIDDEN.name] = getattr(self, TPE.HIDDEN.name) # If the width (or length) attribute is still zero, initialize width # (or length) to the measured width (or length) from the spreadsheet. # But then, if a measured value for width or length is itself still # zero (usually due to poor quality track preservation), then assign # it a nominal (and visually obvious) small value of 10 cm in UI # display. if out[TPE.WIDTH.maya] == 0.0: w = getattr(self, TPE.WIDTH_MEASURED.name) out[TPE.WIDTH.maya] = 0.1 if w == 0.0 else w if out[TPE.LENGTH.maya] == 0.0: w = getattr(self, TPE.LENGTH_MEASURED.name) out[TPE.LENGTH.maya] = 0.1 if w == 0.0 else w return out
def queryByReflection(question, reflectionSource, default=None): choices, data = Reflection.getReflectionNameValueLists(reflectionSource) choiceResult, dataResult = queryFromLargeList(question, choices, data, default=default) return dataResult
def echoImportFlags(self, separator =' | '): """echoImportFlags doc...""" out = [] d = Reflection.getReflectionDict(ImportFlagsEnum) for key, value in DictUtils.iter(d): if value & self.importFlags: out.append(key) return ('[%s]' % separator.join(out)) if out else '--'
def echoAnalysisFlags(self, separator =' | '): """echoAnalysisFlags doc...""" out = [] enums = Reflection.getReflectionDict(AnalysisFlagsEnum) for key, value in DictUtils.iter(enums): if value & self.analysisFlags: out.append(key) return ('[%s]' % separator.join(out)) if out else '--'
def getTrackPropEnumByName(cls, name): """ Retrieves the TrackPropEnum enumerated value based on the name attribute. """ for enum in Reflection.getReflectionList(TrackPropEnum): if enum.name == name: return enum return None
def getTrackProps(cls, node): out = dict() for enum in Reflection.getReflectionList(TrackPropEnum): if enum.maya is None: continue out[enum.name] = cmds.getAttr(node + '.' + enum.maya) return out
def setTrackProps(cls, node, props): for enum in Reflection.getReflectionList(TrackPropEnum): if enum.maya and enum.maya in props: if enum.type == 'string': cmds.setAttr( node + '.' + enum.maya, props[enum.maya], type=enum.type) else: cmds.setAttr(node + '.' + enum.maya, props[enum.maya])
def toDict(self, uniqueOnly =False): """ Returns a dictionary containing the keys and current values of the track object with no dependency on a database session object. """ out = dict(id=self.id, uid=self.uid) for enum in Reflection.getReflectionList(TrackPropEnum): if uniqueOnly and not enum.unique: continue out[enum.name] = getattr(self, enum.name) return self._createDict(**out)
def fromDict(self, data): """ Populates the track with the values specified by data dictionary argument. The keys of the data object should be valid names of the enumerated values in the TrackPropEnum class and the values valid entries for each key in the database class. This method can be used to load a track object from disk into a database model. """ for enum in Reflection.getReflectionList(TrackPropEnum): if enum == TrackPropEnum.UID: continue if enum.name in data: setattr(self, enum.name, data[enum.name])
def findExistingTracks(self, session =None): """ Searches the database for an existing track that matches the current values of the UID in this track instance and returns a result list of any duplicates found. """ if not session: session = self.mySession model = self.__class__ query = session.query(model) for enum in Reflection.getReflectionList(TrackPropEnum): if enum.unique: query = query.filter( getattr(model, enum.name) == getattr(self, enum.name)) return query.all()
def _getSiteValues(self, dataFiles, packages, includes): out = {'dataFiles':dataFiles, 'packages':packages, 'includes':includes} definitions = Reflection.getReflectionList(LIBRARY_INCLUDES) for d in definitions: if d.id != SiteLibraryEnum.COMMON and d.id not in self._includes: continue if d.dataFiles: dataFiles += d.dataFiles if d.packages: packages += d.packages if d.includes: includes += d.includes return out
def _initializeNode(cls): attrs = cls.__nodeAttrDefs__ for name,value in Reflection.getReflectionDict(cls).iteritems(): if isinstance(value, NodeAttribute): # Create the attribute from the definition value.initializeAttribute(cls, name) attrs[name] = value # Add attribute to node setattr(cls, name, value.attr) cls.addAttribute(getattr(cls, name)) # Iterate through each attribute created and connect them according to their affects for n, attrDef in DictUtils.iter(attrs): for target in attrDef.affects: cls.attributeAffects(attrDef.attr, getattr(cls, target)) cls._initializeImpl(attrs)
def prettyPrint(size, sizeEnum =None, precision =-1): if hasattr(size, 'read') or isinstance(size, str): size = SizeConversion.getSizeOfFile(size) if size == 0: return '0B' sizeInBytes = float(size) if sizeEnum is None else float(size)*sizeEnum['bytes'] sizeOptions = sorted(Reflection.getReflectionList(SIZES), key=itemgetter('bytes')) prev = None for opt in sizeOptions: if size < opt['bytes']: newSize = SizeConversion.convert(int(math.ceil(sizeInBytes)), SIZES.BYTES, prev) outSize = str(newSize) if precision != -1 and outSize.find('.') != -1: parts = outSize.split('.') outSize = parts[0] + '.' + parts[1][:precision] return outSize + (prev['id'] if not prev is None else 'B') prev = opt return 'NaN'
def runPythonImport(cls, payload): try: kwargs = payload.get('kwargs', {}) targetModule = StringUtils.toStr2(payload.get('module')) targetMethod = StringUtils.toStr2(payload.get('method')) targetClass = StringUtils.toStr2(payload.get('class')) target = targetClass if targetClass is not None else targetMethod if target is None: parts = targetModule.rsplit('.', 1) targetModule = parts[0] target = parts[1] except Exception as err: NimbleEnvironment.logError([ 'ERROR: Failed to parse python import payload', 'PAYLOAD: ' + DictUtils.prettyPrint(payload)], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError('\n'.join([ 'ERROR: Failed to parse python import payload', 'PAYLOAD: ' + DictUtils.prettyPrint(payload)]), err), response=NimbleResponseData.FAILED_RESPONSE) # Dynamically import the specified module and reload it to make sure any changes have # been updated try: module = __import__( StringUtils.toStr2(targetModule), globals(), locals(), [StringUtils.toStr2(target)] if target else []) reload(module) target = getattr(module, target) except Exception as err: NimbleEnvironment.logError([ 'ERROR: Failed to import python target', 'MODULE: %s' % targetModule, 'TARGET: %s' % target, 'PAYLOAD: ' + DictUtils.prettyPrint(payload)], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError( 'Failed to import python module', err), response=NimbleResponseData.FAILED_RESPONSE) try: result = dict() if targetClass is not None: tc = target() result = getattr(tc, targetMethod)(**kwargs) \ if targetMethod else \ tc(**kwargs) elif targetMethod is not None: result = target(**kwargs) else: # Find a NimbleScriptBase derived class definition and if it exists, run it to # populate the results for name,value in DictUtils.iter(Reflection.getReflectionDict(target)): if not inspect.isclass(value): continue if NimbleScriptBase in value.__bases__: result = getattr(target, name)()(**kwargs) found = True # If a result dictionary contains an error key format the response as a failure errorMessage = None try: errorMessage = ArgsUtils.extract( NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result) except Exception as err: pass return cls.createReply(DataKindEnum.PYTHON_IMPORT, result, errorMessage=errorMessage) except Exception as err: msg = 'ERROR: Failed to execute remote script' NimbleEnvironment.logError([ msg, 'PAYLOAD: ' + DictUtils.prettyPrint(payload), 'TARGET: ' + str(target)], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError(msg, err), response=NimbleResponseData.FAILED_RESPONSE)
def read(self, session, analysisSession, path =None, compressed =False): """ Reads from the spreadsheet located at the absolute path argument and adds each row to the tracks in the database. """ if path is not None: self._path = path if self._path is None: return False model = Tracks_Track.MASTER for existingTrack in session.query(model).all(): self.remainingTracks[existingTrack.uid] = existingTrack.fingerprint try: data = pd.read_csv(self._path) except Exception as err: self._writeError({ 'message':'ERROR: Unable to read CSV file "%s"' % self._path, 'error':err }) return if data is None: self._writeError({ 'message':'ERROR: Failed to create CSV reader for file "%s"' % self._path }) return for index, row in data.iterrows(): # Skip any rows that don't start with the proper numeric index value, which # includes the header row (if it exists) with the column names try: index = int(row[0]) except Exception: continue rowDict = dict() for column in Reflection.getReflectionList(TrackCsvColumnEnum): value = row[column.index] if value and StringUtils.isStringType(value) and not StringUtils.isTextType(value): # Try to decode the value into a unicode string using common codecs for codec in ['utf8', 'MacRoman', 'utf16']: try: decodedValue = value.decode(codec) if decodedValue: value = decodedValue break except Exception: continue try: # Check to see if the value is NaN, and if it is replace it with an empty # string to be ignored during import value = '' if np.isnan(value) else value except Exception: pass if value != '' or value is None: rowDict[column.name] = value self.fromSpreadsheetEntry(rowDict, session) for uid, fingerprint in DictUtils.iter(self.remainingTracks): # Iterate through the list of remaining tracks, which are tracks not found by the # importer. If the track is marked as custom (meaning it is not managed by the importer) # it is ignored. Otherwise, the track is deleted from the database as a track that no # longer exists. track = Tracks_Track.MASTER.getByUid(uid, session) if track.custom: continue Tracks_Track.removeTrack(track, analysisSession) self._logger.write('[REMOVED]: No longer exists "%s" (%s)' % ( track.fingerprint, track.uid)) session.flush() for track in self.created: self._logger.write('[CREATED]: "%s" (%s)' % (track.fingerprint, track.uid)) return True
def fromSpreadsheetEntry(self, csvRowData, session): """ From the spreadsheet data dictionary representing raw track data, this method creates a track entry in the database. """ #------------------------------------------------------------------------------------------- # MISSING # Try to determine if the missing value has been set for this row data. If so and it # has been marked missing, skip the track during import to prevent importing tracks # with no data. try: missingValue = csvRowData[TrackCsvColumnEnum.MISSING.name].strip() if missingValue: return False except Exception: pass try: csvIndex = int(csvRowData[TrackCsvColumnEnum.INDEX.name]) except Exception: self._writeError({ 'message':'Missing spreadsheet index', 'data':csvRowData }) return False model = Tracks_Track.MASTER t = model() t.importFlags = 0 t.index = csvIndex #------------------------------------------------------------------------------------------- # SITE try: t.site = csvRowData.get(TrackCsvColumnEnum.TRACKSITE.name).strip().upper() except Exception: self._writeError({ 'message':'Missing track site', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # SECTOR try: t.sector = csvRowData.get(TrackCsvColumnEnum.SECTOR.name).strip().upper() except Exception: self._writeError({ 'message':'Missing sector', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # LEVEL try: t.level = csvRowData.get(TrackCsvColumnEnum.LEVEL.name) except Exception: self._writeError({ 'message':'Missing level', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # TRACKWAY # Parse the trackway entry into type and number values. In the process illegal # characters are removed to keep the format something that can be handled correctly # within the database. try: test = csvRowData.get(TrackCsvColumnEnum.TRACKWAY.name).strip().upper() except Exception: self._writeError({ 'message':'Missing trackway', 'data':csvRowData, 'index':csvIndex }) return False # If the trackway contains an ignore pattern then return without creating the track. # This is used for tracks in the record that are actually under-prints from a higher # level recorded in the spreadsheet only for catalog reference. testIndexes = [ test.find(self._UNDERPRINT_IGNORE_TRACKWAY_STR), test.find(self._OVERPRINT_IGNORE_TRACKWAY_STR) ] testParensIndex = test.find('(') for testIndex in testIndexes: if testIndex != -1 and (testParensIndex == -1 or testParensIndex > testIndex): return False result = self._TRACKWAY_PATTERN.search(test) try: t.trackwayType = result.groupdict()['type'].upper().strip() t.trackwayNumber = result.groupdict()['number'].upper().strip() except Exception: self._writeError({ 'message':'Invalid trackway value: %s' % test, 'data':csvRowData, 'result':result, 'match':result.groupdict() if result else 'N/A', 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # NAME # Parse the name value into left, pes, and number attributes try: t.name = csvRowData.get(TrackCsvColumnEnum.TRACK_NAME.name).strip() except Exception: self._writeError({ 'message':'Missing track name', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # YEAR try: year = csvRowData.get(TrackCsvColumnEnum.MEASURED_DATE.name) if not year: year = '2014' else: try: y = StringUtils.toText(year).split(';')[-1].strip().replace( '/', '_').replace( ' ', '').replace( '-', '_').split('_')[-1] year = int(re.compile('[^0-9]+').sub('', y)) except Exception: year = 2014 if year > 2999: # When multiple year entries combine into a single large number year = int(StringUtils.toUnicode(year)[-4:]) elif year < 2000: # When two digit years (e.g. 09) are used instead of four digit years year += 2000 year = StringUtils.toUnicode(year) t.year = year except Exception: self._writeError({ 'message':'Missing cast date', 'data':csvRowData, 'index':csvIndex }) return False #------------------------------------------------------------------------------------------- # FIND EXISTING # Use data set above to attempt to load the track database entry fingerprint = t.fingerprint for uid, fp in DictUtils.iter(self.remainingTracks): # Remove the fingerprint from the list of fingerprints found in the database, which at # the end will leave only those fingerprints that exist in the database but were not # touched by the importer. These values can be used to identify tracks that should # have been "touched" but were not. if fp == fingerprint: del self.remainingTracks[uid] break existing = t.findExistingTracks(session) if existing and not isinstance(existing, Tracks_Track): existing = existing[0] if fingerprint in self.fingerprints: if not existing: existing = self.fingerprints[fingerprint] self._writeError({ 'message':'Ambiguous track entry "%s" [%s -> %s]' % ( fingerprint, csvIndex, existing.index), 'data':csvRowData, 'existing':existing, 'index':csvIndex }) return False self.fingerprints[fingerprint] = t if existing: t = existing else: session.add(t) session.flush() TCCE = TrackCsvColumnEnum IFE = ImportFlagsEnum #------------------------------------------------------------------------------------------- # CSV PROPERTY CLEANUP # Cleanup and format additional CSV values before saving the csv data to the track's # snapshot. removeNonColumns = [ TrackCsvColumnEnum.PRESERVED.name, TrackCsvColumnEnum.CAST.name, TrackCsvColumnEnum.OUTLINE_DRAWING.name] for columnName in removeNonColumns: if columnName in csvRowData: testValue = StringUtils.toText(csvRowData[columnName]).strip().upper() if testValue.startswith('NON'): del csvRowData[columnName] # Create a snapshot that only includes a subset of properties that are flagged to be # included in the database snapshot entry snapshot = dict() for column in Reflection.getReflectionList(TrackCsvColumnEnum): # Include only values that are marked in the enumeration as to be included if not column.snapshot or column.name not in csvRowData: continue value = csvRowData.get(column.name) if value is None: continue elif not value is StringUtils.isStringType(value): value = StringUtils.toText(value) value = StringUtils.toText(value).strip() if value in ['-', b'\xd0'.decode(b'MacRoman')]: continue snapshot[column.name] = value #------------------------------------------------------------------------------------------- # WIDTH # Parse the width into a numerical value and assign appropriate default uncertainty try: t.widthMeasured = 0.01*float(self._collapseManusPesProperty( t, csvRowData, TCCE.PES_WIDTH, TCCE.PES_WIDTH_GUESS, TCCE.MANUS_WIDTH, TCCE.MANUS_WIDTH_GUESS, '0', IFE.HIGH_WIDTH_UNCERTAINTY, IFE.NO_WIDTH )) t.widthMeasured = t.widthMeasured if not existing or t.widthUncertainty == 0: t.widthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_WIDTH_UNCERTAINTY) else 0.03 except Exception as err: print(Logger().echoError('WIDTH PARSE ERROR:', err)) self._writeError({ 'message':'Width parse error', 'data':csvRowData, 'error':err, 'index':csvIndex }) t.widthMeasured = 0.0 if not existing: t.widthUncertainty = 0.05 #------------------------------------------------------------------------------------------- # LENGTH # Parse the length into a numerical value and assign appropriate default uncertainty try: t.lengthMeasured = 0.01*float(self._collapseManusPesProperty( t, csvRowData, TCCE.PES_LENGTH, TCCE.PES_LENGTH_GUESS, TCCE.MANUS_LENGTH, TCCE.MANUS_LENGTH_GUESS, '0', IFE.HIGH_LENGTH_UNCERTAINTY, IFE.NO_LENGTH )) t.lengthMeasured = t.lengthMeasured if not existing or t.lengthUncertainty == 0: t.lengthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_LENGTH_UNCERTAINTY) else 0.03 except Exception as err: print(Logger().echoError('LENGTH PARSE ERROR:', err)) self._writeError({ 'message':'Length parse error', 'data':csvRowData, 'error':err, 'index':csvIndex }) t.lengthMeasured = 0.0 if not existing: t.lengthUncertainty = 0.05 #------------------------------------------------------------------------------------------- # DEPTH # Parse the depth into a numerical value and assign appropriate default uncertainty try: t.depthMeasured = 0.01*float(self._collapseManusPesProperty( t, csvRowData, TCCE.PES_DEPTH, TCCE.PES_DEPTH_GUESS, TCCE.MANUS_DEPTH, TCCE.MANUS_DEPTH_GUESS, '0', IFE.HIGH_DEPTH_UNCERTAINTY, 0 )) if not existing or t.depthUncertainty == 0: t.depthUncertainty = 0.05 if (t.importFlags & IFE.HIGH_DEPTH_UNCERTAINTY) else 0.03 except Exception as err: print(Logger().echoError('DEPTH PARSE ERROR:', err)) t.depthMeasured = 0.0 if not existing: t.depthUncertainty = 0.05 #------------------------------------------------------------------------------------------- # ROTATION # Parse the rotation into a numerical value and assign appropriate default uncertainty try: t.rotationMeasured = float(self._collapseLimbProperty( t, csvRowData, TCCE.LEFT_PES_ROTATION, TCCE.LEFT_PES_ROTATION_GUESS, TCCE.RIGHT_PES_ROTATION, TCCE.RIGHT_PES_ROTATION_GUESS, TCCE.LEFT_MANUS_ROTATION, TCCE.LEFT_MANUS_ROTATION_GUESS, TCCE.RIGHT_MANUS_ROTATION, TCCE.RIGHT_MANUS_ROTATION_GUESS, '0', IFE.HIGH_ROTATION_UNCERTAINTY, 0 )) if not existing or t.rotationUncertainty == 0: t.rotationUncertainty = \ 10.0 if (t.importFlags & IFE.HIGH_ROTATION_UNCERTAINTY) else 45.0 except Exception as err: print(Logger().echoError('ROTATION PARSE ERROR:', err)) self._writeError({ 'message':'Rotation parse error', 'error':err, 'data':csvRowData, 'index':csvIndex }) t.rotationMeasured = 0.0 if not existing: t.rotationUncertainty = 45.0 #------------------------------------------------------------------------------------------- # STRIDE try: strideLength = self._collapseManusPesProperty( t, csvRowData, TCCE.PES_STRIDE, TCCE.PES_STRIDE_GUESS, TCCE.MANUS_STRIDE, TCCE.MANUS_STRIDE_GUESS, None, IFE.HIGH_STRIDE_UNCERTAINTY ) strideFactor = self._collapseManusPesProperty( t, csvRowData, TCCE.PES_STRIDE_FACTOR, None, TCCE.MANUS_STRIDE_FACTOR, None, 1.0) if strideLength: snapshot[SnapshotDataEnum.STRIDE_LENGTH] = 0.01*float(strideLength)*float(strideFactor) except Exception as err: print(Logger().echoError('STRIDE PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # WIDTH ANGULATION PATTERN try: widthAngulation = self._collapseManusPesProperty( t, csvRowData, TCCE.WIDTH_PES_ANGULATION_PATTERN, TCCE.WIDTH_PES_ANGULATION_PATTERN_GUESS, TCCE.WIDTH_MANUS_ANGULATION_PATTERN, TCCE.WIDTH_MANUS_ANGULATION_PATTERN_GUESS, None, IFE.HIGH_WIDTH_ANGULATION_UNCERTAINTY ) if widthAngulation: snapshot[SnapshotDataEnum.WIDTH_ANGULATION_PATTERN] = 0.01*float(widthAngulation) except Exception as err: print(Logger().echoError('WIDTH ANGULATION PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # PACE try: pace = self._collapseLimbProperty( t, csvRowData, TCCE.LEFT_PES_PACE, TCCE.LEFT_PES_PACE_GUESS, TCCE.RIGHT_PES_PACE, TCCE.RIGHT_PES_PACE_GUESS, TCCE.LEFT_MANUS_PACE, TCCE.LEFT_MANUS_PACE_GUESS, TCCE.RIGHT_MANUS_PACE, TCCE.RIGHT_MANUS_PACE_GUESS, None, IFE.HIGH_PACE_UNCERTAINTY ) if pace: snapshot[SnapshotDataEnum.PACE] = 0.01*float(pace) except Exception as err: print(Logger().echoError('PACE PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # PACE ANGULATION PATTERN try: paceAngulation = self._collapseManusPesProperty( t, csvRowData, TCCE.PES_PACE_ANGULATION, TCCE.PES_PACE_ANGULATION_GUESS, TCCE.MANUS_PACE_ANGULATION, TCCE.MANUS_PACE_ANGULATION_GUESS, None, IFE.HIGH_WIDTH_ANGULATION_UNCERTAINTY ) if paceAngulation: snapshot[SnapshotDataEnum.PACE_ANGULATION_PATTERN] = float(paceAngulation) except Exception as err: print(Logger().echoError('PACE ANGULATION PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # PROGRESSION try: progression = self._collapseLimbProperty( t, csvRowData, TCCE.LEFT_PES_PROGRESSION, TCCE.LEFT_PES_PROGRESSION_GUESS, TCCE.RIGHT_PES_PROGRESSION, TCCE.RIGHT_PES_PROGRESSION_GUESS, TCCE.LEFT_MANUS_PROGRESSION, TCCE.LEFT_MANUS_PROGRESSION_GUESS, TCCE.RIGHT_MANUS_PROGRESSION, TCCE.RIGHT_MANUS_PROGRESSION_GUESS, None, IFE.HIGH_PROGRESSION_UNCERTAINTY ) if progression: snapshot[SnapshotDataEnum.PROGRESSION] = 0.01*float(progression) except Exception as err: print(Logger().echoError('PROGRESSION PARSE ERROR:', err)) #------------------------------------------------------------------------------------------- # GLENO-ACETABULAR DISTANCE try: gad = self._collapseGuessProperty( t, csvRowData, TCCE.GLENO_ACETABULAR_DISTANCE, TCCE.GLENO_ACETABULAR_DISTANCE_GUESS, None, IFE.HIGH_GLENO_ACETABULAR_UNCERTAINTY ) if gad: snapshot[SnapshotDataEnum.GLENO_ACETABULAR_LENGTH] = 0.01*float(gad) except Exception as err: print(Logger().echoError('GLENO-ACETABULAR DISTANCE PARSE ERROR:', err)) # Save the snapshot try: t.snapshot = JSON.asString(snapshot) except Exception: raise if TrackCsvColumnEnum.MEASURED_BY.name not in snapshot: # Mark entries that have no field measurements with a flag for future reference t.importFlags |= ImportFlagsEnum.NO_FIELD_MEASUREMENTS if existing: self.modified.append(t) else: self.created.append(t) return t
def runPythonImport(cls, payload): try: kwargs = payload.get('kwargs', {}) targetModule = StringUtils.toStr2(payload.get('module')) targetMethod = StringUtils.toStr2(payload.get('method')) targetClass = StringUtils.toStr2(payload.get('class')) target = targetClass if targetClass is not None else targetMethod if target is None: parts = targetModule.rsplit('.', 1) targetModule = parts[0] target = parts[1] except Exception as err: NimbleEnvironment.logError([ 'ERROR: Failed to parse python import payload', 'PAYLOAD: ' + DictUtils.prettyPrint(payload) ], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError( '\n'.join([ 'ERROR: Failed to parse python import payload', 'PAYLOAD: ' + DictUtils.prettyPrint(payload) ]), err), response=NimbleResponseData.FAILED_RESPONSE) # Dynamically import the specified module and reload it to make sure any changes have # been updated try: module = __import__(StringUtils.toStr2(targetModule), globals(), locals(), [StringUtils.toStr2(target)] if target else []) reload(module) target = getattr(module, target) except Exception as err: NimbleEnvironment.logError([ 'ERROR: Failed to import python target', 'MODULE: %s' % targetModule, 'TARGET: %s' % target, 'PAYLOAD: ' + DictUtils.prettyPrint(payload) ], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError('Failed to import python module', err), response=NimbleResponseData.FAILED_RESPONSE) try: result = dict() if targetClass is not None: tc = target() result = getattr(tc, targetMethod)(**kwargs) \ if targetMethod else \ tc(**kwargs) elif targetMethod is not None: result = target(**kwargs) else: # Find a NimbleScriptBase derived class definition and if it exists, run it to # populate the results for name, value in DictUtils.iter( Reflection.getReflectionDict(target)): if not inspect.isclass(value): continue if NimbleScriptBase in value.__bases__: result = getattr(target, name)()(**kwargs) found = True # If a result dictionary contains an error key format the response as a failure errorMessage = None try: errorMessage = ArgsUtils.extract( NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result) except Exception as err: pass return cls.createReply(DataKindEnum.PYTHON_IMPORT, result, errorMessage=errorMessage) except Exception as err: msg = 'ERROR: Failed to execute remote script' NimbleEnvironment.logError([ msg, 'PAYLOAD: ' + DictUtils.prettyPrint(payload), 'TARGET: ' + str(target) ], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError(msg, err), response=NimbleResponseData.FAILED_RESPONSE)
def _runImpl(self): model = Tracks_SiteMap.MASTER session = self._session if self._session else model.createSession() try: self._log.write(u'<h1>Beginning Sitemap Import...</h1>') if self._path is None or not os.path.exists(self._path): self._log.write(u'<h2>Invalid or missing path</h2>') return 1 # Delete all existing rows rowCount = session.query(model).count() if rowCount > 0: session.query(model).delete() with open(self._path, 'rU') as f: try: reader = csv.reader( f, delimiter=StringUtils.toStr2(','), quotechar=StringUtils.toStr2('"')) except Exception as err: self._writeError({ 'message':u'ERROR: Unable to read CSV file "%s"' % self._path, 'error':err }) return if reader is None: self._writeError({ 'message':u'ERROR: Failed to create CSV reader for file "%s"' % self._path }) return for row in reader: # Skip any rows that don't start with the proper numeric index value, which # includes the header row (if it exists) with the column names try: index = int(row[0]) except Exception as err: continue rowDict = dict() for column in Reflection.getReflectionList(SitemapCsvColumnEnum): value = row[column.index] value = StringUtils.strToUnicode(value) if value != u'' or value is None: rowDict[column.name] = value self._fromSpreadsheetEntry(rowDict, session) except Exception as err: if not self._session: session.rollback() session.close() self._log.writeError(u'ERROR: Sitemap Importing Error', err) return 1 if self._session is None: session.commit() session.close() self._log.write(u'<h1>Sitemap Import Complete</h1>') return 0
def siteLibraries(self): return Reflection.getReflectionList(SiteLibraryEnum)
def runPythonExec(script, kwargs=None): from nimble.NimbleEnvironment import NimbleEnvironment from nimble.data.NimbleResponseData import NimbleResponseData from nimble.data.enum.DataKindEnum import DataKindEnum try: nimble.cmds.undoInfo(openChunk=True) except Exception as err: return False try: # Create a new, temporary module in which to run the script module = imp.new_module('runExecTempModule') # Initialize the script with script inputs setattr(module, NimbleEnvironment.REMOTE_KWARGS_KEY, kwargs if kwargs is not None else dict()) setattr(module, NimbleEnvironment.REMOTE_RESULT_KEY, dict()) # Executes the script in the new module exec_(script, module.__dict__) # Find a NimbleScriptBase derived class definition and if it exists, run it to populate the # results for name, value in Reflection.getReflectionDict(module).iteritems(): if not inspect.isclass(value): continue if NimbleScriptBase in value.__bases__: getattr(module, name)().run() break # Retrieve the results object that contains all results set by the execution of the script result = getattr(module, NimbleEnvironment.REMOTE_RESULT_KEY) except Exception as err: logger = Logger('runPythonExec', printOut=True) logger.writeError('ERROR: Failed Remote Script Execution', err) result = NimbleResponseData( kind=DataKindEnum.PYTHON_SCRIPT, response=NimbleResponseData.FAILED_RESPONSE, error=str(err)) # If a result dictionary contains an error key format the response as a failure try: errorMessage = ArgsUtils.extract( NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result) if errorMessage: return NimbleResponseData( kind=DataKindEnum.PYTHON_SCRIPT, response=NimbleResponseData.FAILED_RESPONSE, error=errorMessage, payload=result) except Exception as err: pass try: nimble.cmds.undoInfo(closeChunk=True) except Exception as err: return False return result
name += pp.source[start+i] return name.lower() #___________________________________________________________________________________________________ createFromBlock @classmethod def createFromBlock(cls, processor, block, index): try: name = cls.getTagNameFromBlock(processor, block) except Exception, err: return None if MarkupTag._TAG_LIST is None: tags = dict() from StaticFlow.render.tags.TagDefinitions import TagDefinitions for tagDef in Reflection.getReflectionList(TagDefinitions): tags[tagDef.TAG] = tagDef MarkupTag._TAG_LIST = tags classImport = MarkupTag._TAG_LIST.get(name, None) if classImport is None: return None return classImport(processor, block, index, name) #___________________________________________________________________________________________________ getClassAttr def getClassAttr(self, attr, defaultValue =None): # If that attr is already in the class metadata cache return the value if attr in self._classMetadata: return self._classMetadata[attr]
def parseBorder(cls, attrs, apply =False, overrides =None, extract =False, group =None, defaultColor =None): keys = TagAttributesEnum.BORDER borderColor = None borderSize = None borderStyle = None useBorder = attrs.getAsBool(keys, None, overrides, True) if useBorder is None: border = attrs.get(keys, None, overrides) if not border: useBorder = False else: useBorder = True if not isinstance(border, list): border = [border] for item in border: if borderSize is None: unitItem = UnitAttribute.createIfValid(item, 'px') if unitItem: borderSize = unitItem.valueAndUnit continue if borderStyle is None: found =False for lineType in Reflection.getReflectionList(LineTypesEnum): if item in lineType[1]: borderStyle = lineType[0] found = True break if found: continue if borderColor is None: color = attrs.convertToColorValue(item, None) if color: borderColor = color if extract: attrs.remove(keys) if not useBorder: return None if borderColor is None: if defaultColor is None: borderColor = attrs.backColors.borderColor.web elif isinstance(defaultColor, ColorValue): borderColor = defaultColor.web else: borderColor = defaultColor else: borderColor = borderColor.web border = u' '.join([ borderSize if borderSize else '1px', borderStyle if borderStyle else 'solid', borderColor ]) if apply: attrs.styles.add('border', border, group) return border