def _writeError(self, data): """ Writes import error data to the logger, formatting it for human readable display. """ source = {} if 'data' in data: for n,v in DictUtils.iter(data['data']): source[' '.join(n.split('_')).title()] = v indexPrefix = '' if 'index' in data: indexPrefix = ' [INDEX: %s]:' % data.get('index', 'Unknown') result = [ 'IMPORT ERROR%s: %s' % (indexPrefix, data['message']), 'DATA: ' + DictUtils.prettyPrint(source)] if 'existing' in data: source = {} snapshot = data['existing'].snapshot if snapshot: snapshot = JSON.fromString(snapshot) if snapshot: for n,v in DictUtils.iter(snapshot): source[' '.join(n.split('_')).title()] = v result.append('CONFLICT: ' + DictUtils.prettyPrint(source)) if 'error' in data: self._logger.writeError(result, data['error']) else: self._logger.write(result)
def _postAnalyze(self): self.logger.write('TRACKWAY COUNT: %s' % self._weightedStats.count) self._weightedStats.save() self._unweightedStats.save() for key, csv in DictUtils.iter(self._quartileStats): csv.save() for label, paths in DictUtils.iter(self._densityPlots): self.mergePdfs(paths, '%s-Densities.pdf' % label.replace(' ', '-'))
def _postAnalyze(self): """_postAnalyze doc...""" ratios = [] for name, curve in DictUtils.iter(self.data): segments = curve.segments for i in ListUtils.rangeOn(segments): segment = segments[i] segmentLine = segment.line # If this is an extrapolated segment, use the length from the neighboring segment # instead of the artificial length of this segment. if segment == segments[0]: segmentLine = segments[i + 1].line elif segment == segments[-1]: segmentLine = segments[i - 1].line for pairData in segment.pairs: projectionLine = pairData["line"] ratios.append(100.0 * projectionLine.length.raw / segmentLine.length.raw) h = Histogram( data=ratios, binCount=50, xLabel="Projection/Stride Ratio (%)", title="Relative Stride to Projection Length Ratios", ) h.shaveDataToXLimits() self._paths.append(h.save(path=self.getTempFilePath(extension="pdf"))) self.mergePdfs(self._paths, "Curve-Projection.pdf")
def echo(self): print('TARGET:',self.target) print('GAIT PHASE OFFSET:',self._phaseOffset) print('DUTY FACTOR:',self._dutyFactor) print('CHANNELS:') for n,v in DictUtils.iter(self._channels): print(v.toString())
def getChannel(self, kind): """Doc...""" for n,v in DictUtils.iter(self._channels): if n == kind: return v return None
def compileAllOnPath(path, rootPath=None, recursive=False, debug=False, trace=False, force=False, compress=False): CoffeescriptBuilder._results = "" CoffeescriptBuilder._missing = {} if recursive: print("RECURSIVE COMPILE AT: " + path) def walker(paths, dirName, names): out = CoffeescriptBuilder._compileAllInDirectory( os.path.join(paths[0], dirName), paths[1], debug=debug, trace=trace, force=force, compress=compress ) CoffeescriptBuilder._results += out["res"] for n, v in DictUtils.iter(out["missing"]): if n in CoffeescriptBuilder._missing: continue CoffeescriptBuilder._missing[n] = v FileUtils.walkPath(path, walker, [path, rootPath]) print("\n\nCOMPILATION RESULTS:" + CoffeescriptBuilder._results) if CoffeescriptBuilder._missing: print("\n\nMISSING IMPORTS:" + "\n\n") for n, v in DictUtils.iter(CoffeescriptBuilder._missing): print(v["class"] + " [LINE: #" + str(v["line"]) + " | " + v["package"] + "]") else: print("COMPILING DIRECTORY: " + path) CoffeescriptBuilder._compileAllInDirectory( path, rootPath, debug=debug, trace=trace, force=force, compress=compress )
def echoModel(self): """ An example using a Ziggurat database model. Here a new entry of the ZigguratTest_Test model is created and added to the database and its index in differing radices is returned in the response. NOTE: The model class is imported in-line in this example simply to allow use of the Hello Ziggurat examples without model support for those not in an environment without the required database support. """ try: from ziggHello.models.zigguratTest.ZigguratTest_Test import ZigguratTest_Test model = ZigguratTest_Test.MASTER out = dict() for name, value in DictUtils.iter(self._router.ziggurat.environ): if name.upper() == name: out[name] = StringUtils.toUnicode(value) entry = model() entry.infoData = out model.session.add(entry) model.session.flush() except Exception as err: self._router.response['error'] = str(err) self._router.logger.writeError(u'MODEL ERROR', err) return self._router.response['index'] = [entry.i, entry.i16, entry.i36, entry.i64]
def _handleResponseReady(self, request, response): """Event handler for the response object being ready for use.""" if self._cacheControlPublic: response.cache_control = "public" # ------------------------------------------------------------------------------------------- # Cache Expiration: Set the caching values according to the _expires property rep = self._explicitResponse if rep is None or (isinstance(rep, ViewResponse) and rep.allowCaching): response.cache_control.max_age = self.expires if not self.expires is None else 0 else: response.cache_control.max_age = 0 # ------------------------------------------------------------------------------------------- # Cache Validators if self._etag is not None: response.etag = StringUtils.toUnicode(self._etag) if self._lastModified is not None: response.last_modified = self._lastModified # If required encode the response headers as strings to prevent unicode errors. This is # necessary for certain WSGI server applications, e.g. flup. if self.ziggurat.strEncodeEnviron: for n, v in DictUtils.iter(response.headers): if StringUtils.isStringType(v): response.headers[n] = StringUtils.toStr2(v) # Clean up per-thread sessions. ConcreteModelsMeta.cleanupSessions()
def getColorNameAndValue(self): """ Finds the nearest named color by comparing all named colors """ if self._rawColor == 0: return { 'name':'Black', 'value':0, 'key':'black', 'residual':0.0 } maxRange = 560.0 nearestValue = None nearestName = None range = 360 myColor = self.asHsl(output=list) poolColor = self.__class__(0) for name, value in DictUtils.iter(ColorNames.NAMES): poolColor.load(value) color = poolColor.asHsl(output=list) test = (myColor[0] - color[0])*(myColor[0] - color[0]) \ + (myColor[1] - color[1])*(myColor[1] - color[1]) \ + (myColor[2] - color[2])*(myColor[2] - color[2]) if test < range: nearestValue = value nearestName = name range = test if range < 1: break return { 'name':StringUtils.capitalizeWords(nearestName.replace('_', ' ')), 'value':nearestValue, 'key':nearestName, 'residual':100.0*range/maxRange }
def currentChildWidgetID(self): if not self._currentWidget: return None for key, widget in DictUtils.iter(self._widgets): if widget == self._currentWidget: return key return None
def save(self, path =None): """ Saves the CSV file data to the specified path """ if path is None: path = self.path if self.removeIfSavedEmpty and not self.rows: self.remove() return index = 0 names = self.fieldNames if self.autoIndexFieldName: names.insert(0, self.autoIndexFieldName) try: with open(path, 'wb') as f: writer = csv.DictWriter(f, fieldnames=names, dialect=csv.excel) writer.writeheader() for row in self.rows: result = dict() if self.autoIndexFieldName: index += 1 result[self.autoIndexFieldName] = index for key, spec in DictUtils.iter(self._fields): value = row.get(key, spec.get('empty', '')) name = spec.get('name', key) if StringUtils.isTextType(value): value = value.encode('latin-1') result[name] = value writer.writerow(result) return True except Exception: return False
def addChannels(self, channels): if isinstance(channels, list): for v in channels: self.addChannel(v) elif isinstance(channels, dict): for n, v in DictUtils.iter(channels): self.addChannel(v)
def __init__(self, **kwargs): """Creates a new instance of ConfigReader.""" self._configs = ArgsUtils.get('configs', dict(), kwargs) self._filenames = ArgsUtils.get('filenames', None, kwargs) self._configPath = ArgsUtils.get( 'rootConfigPath', CadenceEnvironment.getConfigPath(), kwargs ) if self._filenames: for n,v in DictUtils.iter(self._filenames): if not v: continue path = os.path.join(self._configPath, v) if not path.endswith('.cfg'): path += '.cfg' parser = ConfigParser.ConfigParser() if os.path.exists(path): parser.read(path) else: raise Exception(path + ' config file does not exist!') self._configs[n] = self._configParserToDict(parser) self._overrides = dict() self.setOverrides(ArgsUtils.get('overrides', None, kwargs))
def setFromDict(self, keysAndValues): if not keysAndValues: return self._loadSettings() for key, value in DictUtils.iter(keysAndValues): self._updateSetting(key, value) self._saveSettings()
def echoImportFlags(self, separator =' | '): """echoImportFlags doc...""" out = [] d = Reflection.getReflectionDict(ImportFlagsEnum) for key, value in DictUtils.iter(d): if value & self.importFlags: out.append(key) return ('[%s]' % separator.join(out)) if out else '--'
def echoAnalysisFlags(self, separator =' | '): """echoAnalysisFlags doc...""" out = [] enums = Reflection.getReflectionDict(AnalysisFlagsEnum) for key, value in DictUtils.iter(enums): if value & self.analysisFlags: out.append(key) return ('[%s]' % separator.join(out)) if out else '--'
def equivalentProps(self, **kwargs): """ Iterates through the kwargs and checks whether or not the values for each kwarg property to see if it matches the value for this track instance. """ for n,v in DictUtils.iter(kwargs): if getattr(self, n) != v: return False return True
def CLEAN_NAMES(cls): """ doc...""" if cls._CLEAN_NAMES: return cls._CLEAN_NAMES cls._CLEAN_NAMES = dict() for name, value in DictUtils.iter(cls.NAMES): cls._CLEAN_NAMES[name.replace('_', '').lower()] = value return cls._CLEAN_NAMES
def walker(paths, dirName, names): out = CoffeescriptBuilder._compileAllInDirectory( os.path.join(paths[0], dirName), paths[1], debug=debug, trace=trace, force=force, compress=compress ) CoffeescriptBuilder._results += out["res"] for n, v in DictUtils.iter(out["missing"]): if n in CoffeescriptBuilder._missing: continue CoffeescriptBuilder._missing[n] = v
def getByProperties(cls, session, **kwargs): """ Loads based on the current values set for the track. This form of loading is useful when the uid is not available, e.g. when importing data from the spreadsheet. """ query = session.query(cls) for key,value in DictUtils.iter(kwargs): query = query.filter(getattr(cls, key) == value) return query.all()
def _cleanupSettings(self, target =None): if not target: target = self._settings for n,v in DictUtils.iter(target): if isinstance(v, dict): self._cleanupSettings(target=v) if not v: del target[n] return True
def _toSerializedDict(cls, src): out = dict() for n,v in DictUtils.iter(src): if isinstance(v, Vector3D): v = v.toSerialDict() elif isinstance(v, dict): v = cls._toSerializedDict(v) out[n] = v return out
def _handleHttpsResult(self, threadResult): result = threadResult['output'] self.content = result.content for headerName, headerValue in DictUtils.iter(result.headers): if headerName in ['content-length', 'connection', 'content-encoding']: continue self.setRawHeader(headerName, headerValue) self._finalize() return
def _parseElement(name, value, configData): if isinstance(value, list): configData.setItem(name, value[0], value[1]) elif isinstance(value, str): configData.setItem(name, 's', value) elif isinstance(value, (int, float)): configData.setItem(name, 'n', value) elif isinstance(value, dict): cd = ConfigData() for n, v in DictUtils.iter(value): JSONConfigParser._parseElement(n, v, cd) configData.setItem(name, 'o', cd)
def _parseElement(name, value, configData): if isinstance(value, list): configData.setItem(name, value[0], value[1]) elif isinstance(value, str): configData.setItem(name, 's', value) elif isinstance(value, (int, float)): configData.setItem(name, 'n', value) elif isinstance(value, dict): cd = ConfigData() for n,v in DictUtils.iter(value): JSONConfigParser._parseElement(n, v, cd) configData.setItem(name, 'o', cd)
def showApplicationLevelWidget(self, widgetID, **kwargs): w = self.getApplicationLevelWidget(widgetID) if not w: return for wid, widget in DictUtils.iter(self._appLevelWidgets): if wid == widgetID: widget.setVisible(True) widget.activateWidgetDisplay(**kwargs) else: widget.visibility.addMuteRequest(w) self.refreshGui()
def _fromSerializedDict(cls, src): out = dict() for n,v in DictUtils.iter(src): if isinstance(v, dict): if 'objectType' in v: if v['objectType'] == Vector3D.__name__: v = Vector3D.fromSerialDict(v) else: v = cls._fromSerializedDict(v) out[n] = v return out
def walker(paths, dirName, names): out = CoffeescriptBuilder._compileAllInDirectory( os.path.join(paths[0], dirName), paths[1], debug=debug, trace=trace, force=force, compress=compress) CoffeescriptBuilder._results += out['res'] for n, v in DictUtils.iter(out['missing']): if n in CoffeescriptBuilder._missing: continue CoffeescriptBuilder._missing[n] = v
def echo(self, verbose =False, pretty =False): msg = self._createMessage() header = 'RESPONSE' if hasattr(self, 'response') else 'REQUEST' if verbose: if pretty: s = '\n' + 100*'-' + '\n' + header + ':\n' + (len(header) + 1)*'-' + '\n' for n,v in DictUtils.iter(msg): s += ' ' + str(n).upper() + ': ' + str(v) + '\n' return s return header + ': ' + str(msg) return '<NIMBLE %s | %s>' % (header, self.kind)
def echo(self, verbose=False, pretty=False): msg = self._createMessage() header = 'RESPONSE' if hasattr(self, 'response') else 'REQUEST' if verbose: if pretty: s = '\n' + 100 * '-' + '\n' + header + ':\n' + (len(header) + 1) * '-' + '\n' for n, v in DictUtils.iter(msg): s += ' ' + str(n).upper() + ': ' + str(v) + '\n' return s return header + ': ' + str(msg) return '<NIMBLE %s | %s>' % (header, self.kind)
def compute(self, plug, dataBlock): for n, attrDef in DictUtils.iter(self.__nodeAttrDefs__): if plug == attrDef.attr: if not attrDef.isComputable: break try: return attrDef.compute(self, plug, dataBlock) except: sys.stderr.write('ERROR: Failed "%s" node computation on %s' % ( self.NODE_NAME, attrDef.name)) raise return self._computeImpl(plug, dataBlock)
def hideApplicationLevelWidget(self, widgetID, **kwargs): w = self.getApplicationLevelWidget(widgetID) if not w: return for wid, widget in DictUtils.iter(self._appLevelWidgets): if wid == widgetID: widget.setVisible(False) widget.deactivateWidgetDisplay(**kwargs) else: widget.visibility.removeMuteRequest(w) if widget.visibility.isVisible: widget.refreshWidgetDisplay() self.refreshGui()
def _compileAllInDirectory(path, rootPath=None, debug=False, trace=False, force=False, compress=False): results = '' missing = {} count = 0 for f in CoffeescriptBuilder.getScriptsInPath(path): target = CoffeescriptDependency(f, rootPath) if not (target.exists and (target.isExec or target.isLib)): continue c = CoffeescriptBuilder(target, rootPath, debug=debug, trace=trace, force=force, compress=compress) c.construct() count += 1 for n, v in DictUtils.iter(c.report): num = max(0, 60 - len(n)) results += '\n' + n + ':' + ('.' * num) if v == 0: results += 'SUCCESS' elif v > 0: results += 'COMPILATION FAILED' else: results += 'ASSEMBLY FAILED' if len(c.warnings) > 0: results += '[' + str(len(c.warnings)) + ' WARNINGS]' for v in c.warnings: if not v[ 'id'] == CoffeescriptBuilder._WARN_ID_MISSING_IMPORT: continue key = v['package'] + '-' + v['class'] + '-' + str( v['line']) if key in missing: continue missing[key] = v if len(results) > 0: print('\nDIRECTORY ' + path + ' COMPILE RESULTS [' + str(count) + ']:' + results) return {'res': results, 'missing': missing}
def _analyzeTrackway(self, trackway, sitemap): spec = dict( pesWidth='Pes Width', pesLength='Pes Length', manusWidth='Manus Width', manusLength='Manus Length', strideLength='Stride Length', paceLength='Pace Length', gauge='Gauge', widthNormGauge='Width Normalized Gauge') data = {} for key, label in DictUtils.iter(spec): data[key] = [] trackway.cache.set('data', data) super(TrackwayStatsStage, self)._analyzeTrackway(trackway, sitemap) self._populateCsvData(self._weightedStats, trackway, data) self._populateCsvData(self._unweightedStats, trackway, data, False) for key, label in DictUtils.iter(spec): self._addQuartileEntry(label, trackway, data[key])
def parse(data, target=None, parseToInterchangeFormat=False): d = json.loads(data) if target is None: target = {} cd = ConfigData() for n, v in DictUtils.iter(d): JSONConfigParser._parseElement(n, v, cd) if parseToInterchangeFormat: cd.writeToInterchangeDict(target) else: cd.writeToDict(target) return target
def _createElement(data): if isinstance(data, list): if isinstance(data[1], list): out = [] for v in data[1]: out.append(str(v)) d = '|'.join(out) else: d = data[1] return [data[0], d] elif isinstance(data, dict): d = {} for n, v in DictUtils.iter(data): d[n] = JSONConfigParser._createElement(v) return d return data
def _writeNode(name, data, depth =1): indent = (' '*4*depth) target = indent + '<' if isinstance(data, list): d = '|'.join(data[1]) if isinstance(data[1], list) else str(data) target += data[0] + ' n="' + name + '" v="' + d + '" />\n' elif isinstance(data, dict): target += 'o n="' + name + '">\n' for n,v in DictUtils.iter(data): target += XMLConfigParser._writeNode(n, v, depth+1) target += indent + '</o>' elif isinstance(data, str): target += 's' + 'n="' + name + '" v="' + data + '" />\n' elif isinstance(data, (int, float)): target += 'n' + 'n="' + name + '" v="' + str(data) + '" />\n' else: target += 'unknown n="' + name + '" />' return target
def compileAllOnPath(path, rootPath=None, recursive=False, debug=False, trace=False, force=False, compress=False): CoffeescriptBuilder._results = '' CoffeescriptBuilder._missing = {} if recursive: print('RECURSIVE COMPILE AT: ' + path) def walker(paths, dirName, names): out = CoffeescriptBuilder._compileAllInDirectory( os.path.join(paths[0], dirName), paths[1], debug=debug, trace=trace, force=force, compress=compress) CoffeescriptBuilder._results += out['res'] for n, v in DictUtils.iter(out['missing']): if n in CoffeescriptBuilder._missing: continue CoffeescriptBuilder._missing[n] = v FileUtils.walkPath(path, walker, [path, rootPath]) print('\n\nCOMPILATION RESULTS:' + CoffeescriptBuilder._results) if CoffeescriptBuilder._missing: print('\n\nMISSING IMPORTS:' + '\n\n') for n, v in DictUtils.iter(CoffeescriptBuilder._missing): print(v['class'] + ' [LINE: #' + str(v['line']) + ' | ' + v['package'] + ']') else: print('COMPILING DIRECTORY: ' + path) CoffeescriptBuilder._compileAllInDirectory(path, rootPath, debug=debug, trace=trace, force=force, compress=compress)
def _reformat(cls, src): out = dict() for n, v in DictUtils.iter(src): n = StringUtils.strToUnicode(n) out[n] = cls._reformatValue(v) return out
def runPythonImport(cls, payload): try: kwargs = payload.get('kwargs', {}) targetModule = StringUtils.toStr2(payload.get('module')) targetMethod = StringUtils.toStr2(payload.get('method')) targetClass = StringUtils.toStr2(payload.get('class')) target = targetClass if targetClass is not None else targetMethod if target is None: parts = targetModule.rsplit('.', 1) targetModule = parts[0] target = parts[1] except Exception as err: NimbleEnvironment.logError([ 'ERROR: Failed to parse python import payload', 'PAYLOAD: ' + DictUtils.prettyPrint(payload) ], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError( '\n'.join([ 'ERROR: Failed to parse python import payload', 'PAYLOAD: ' + DictUtils.prettyPrint(payload) ]), err), response=NimbleResponseData.FAILED_RESPONSE) # Dynamically import the specified module and reload it to make sure any changes have # been updated try: module = __import__(StringUtils.toStr2(targetModule), globals(), locals(), [StringUtils.toStr2(target)] if target else []) reload(module) target = getattr(module, target) except Exception as err: NimbleEnvironment.logError([ 'ERROR: Failed to import python target', 'MODULE: %s' % targetModule, 'TARGET: %s' % target, 'PAYLOAD: ' + DictUtils.prettyPrint(payload) ], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError('Failed to import python module', err), response=NimbleResponseData.FAILED_RESPONSE) try: result = dict() if targetClass is not None: tc = target() result = getattr(tc, targetMethod)(**kwargs) \ if targetMethod else \ tc(**kwargs) elif targetMethod is not None: result = target(**kwargs) else: # Find a NimbleScriptBase derived class definition and if it exists, run it to # populate the results for name, value in DictUtils.iter( Reflection.getReflectionDict(target)): if not inspect.isclass(value): continue if NimbleScriptBase in value.__bases__: result = getattr(target, name)()(**kwargs) found = True # If a result dictionary contains an error key format the response as a failure errorMessage = None try: errorMessage = ArgsUtils.extract( NimbleEnvironment.REMOTE_RESULT_ERROR_KEY, None, result) except Exception as err: pass return cls.createReply(DataKindEnum.PYTHON_IMPORT, result, errorMessage=errorMessage) except Exception as err: msg = 'ERROR: Failed to execute remote script' NimbleEnvironment.logError([ msg, 'PAYLOAD: ' + DictUtils.prettyPrint(payload), 'TARGET: ' + str(target) ], err) return NimbleResponseData( kind=DataKindEnum.PYTHON_IMPORT, error=cls._getDetailedError(msg, err), response=NimbleResponseData.FAILED_RESPONSE)
def serialize(interchangeData): xml = '<vm>\n' for n,v in DictUtils.iter(interchangeData): xml += XMLConfigParser._writeNode(n, v) return (xml + '</vm>').decode('unicode_escape')
def puts(self, **kwargs): for key, value in DictUtils.iter(kwargs): self.put(key, value)
def run(self): """Doc...""" #------------------------------------------------------------------------------------------- # GET SELECTED OBJECTS # Get a list of select objects. If no objects are selected then return an error. # Because objects are list based on components, shape nodes are generally returned # instead of transform nodes. In those cases the transform node must be found from # the shape node name objectSelection = cmds.ls(selection=True, objectsOnly=True) if not objectSelection: self.putErrorResult(u'Nothing selected') return targets = dict() for obj in objectSelection: # Check for shape nodes, and get transform node name if a shape is found nodeTypes = cmds.nodeType(obj, inherited=True) if u'shape' in nodeTypes: obj = obj.rsplit(u'|', 1)[0] targets[obj] = [] #------------------------------------------------------------------------------------------- # SORT SELECTED FACES # Use a component selection to get the selected faces and add them to the target # list for their object. for comp in cmds.ls(selection=True, flatten=True): parts = comp.split(u'.') if len(parts) < 2 or parts[0] not in targets: continue targets[parts[0]].append(int(parts[1].lstrip(u'f[').rstrip(u']'))) #------------------------------------------------------------------------------------------- # EXTRACT & SEPARATE # For each object in the targets list extract the selected faces by chipping them off # and then separating the mesh into the separated pieces. results = dict() selects = [] for obj, faces in DictUtils.iter(targets): if not faces: continue faces.sort() comps = [] for f in faces: comps.append(u'%s.f[%s]' % (obj, f)) cmds.polyChipOff(*comps, duplicate=False, keepFacesTogether=True) separateOut = cmds.polySeparate(obj) out = [] for node in separateOut: if MayaNodeUtils.isTransformNode(node): out.append(node) selects.append(node) results[obj] = out cmds.select(*selects, replace=True) self.put('extracts', results)
def serialize(interchangeData): data = {} for n, v in DictUtils.iter(interchangeData): data[n] = JSONConfigParser._createElement(v) return json.dumps(data, separators=(',', ':')).decode('unicode_escape')