class ProcessTerminationMonitor(plugins.Observable): def __init__(self): plugins.Observable.__init__(self) self.processesForKill = OrderedDict() self.exitHandlers = OrderedDict() def listRunningProcesses(self): processesToCheck = guiConfig.getCompositeValue("query_kill_processes", "", modeDependent=True) if "all" in processesToCheck: processesToCheck = [".*"] if len(processesToCheck) == 0: return [] running = [] triggerGroup = plugins.TextTriggerGroup(processesToCheck) for process, description in self.processesForKill.values(): if triggerGroup.stringContainsText(description): running.append("PID " + str(process.pid) + " : " + description) return running def getProcessIdentifier(self, process): # Unfortunately the child_watch_add method needs different ways to # identify the process on different platforms... if os.name == "posix": return process.pid else: return process._handle def startProcess( self, cmdArgs, description="", killOnTermination=True, exitHandler=None, exitHandlerArgs=(), **kwargs ): process = subprocess.Popen(cmdArgs, stdin=open(os.devnull), **kwargs) pidOrHandle = self.getProcessIdentifier(process) self.exitHandlers[int(pidOrHandle)] = (exitHandler, exitHandlerArgs) if killOnTermination: self.processesForKill[int(pidOrHandle)] = (process, description) gobject.child_watch_add(pidOrHandle, self.processExited) def processExited(self, pid, *args): if self.processesForKill.has_key(pid): del self.processesForKill[pid] if self.exitHandlers.has_key(pid): exitHandler, exitHandlerArgs = self.exitHandlers.pop(pid) if exitHandler: exitHandler(*exitHandlerArgs) def notifyKillProcesses(self, sig=None): # Don't leak processes if len(self.processesForKill) == 0: return diag = logging.getLogger("kill processes") self.notify("Status", "Terminating all external viewers ...") for pid, (process, description) in self.processesForKill.items(): if self.exitHandlers.has_key(pid): self.exitHandlers.pop(pid) # don't call exit handlers in this case, we're terminating self.notify("ActionProgress") diag.info("Killing '" + description + "' interactive process") killSubProcessAndChildren(process, sig)
def kmeans_get_cluster_values(data, clusters): values = OrderedDict() for dataKey, cluster in clusters.items(): if not values.has_key(cluster): values[cluster] = [] values[cluster].append(data[dataKey]) return values
class EmailResponder(plugins.Responder): def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date self.batchAppData = OrderedDict() self.allApps = OrderedDict() def notifyComplete(self, test): if test.app.emailEnabled(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app) def notifyAllComplete(self): mailSender = MailSender(self.runId) for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) mailSender.send(batchDataList)
class JUnitResponder(plugins.Responder): """Respond to test results and write out results in format suitable for JUnit report writer. Only does anything if the app has batch_junit_format:true in its config file """ def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = getBatchRunName(optionMap) self.allApps = OrderedDict() self.appData = OrderedDict() def useJUnitFormat(self, app): return app.getBatchConfigValue("batch_junit_format") == "true" def notifyComplete(self, test): if not self.useJUnitFormat(test.app): return if not self.appData.has_key(test.app): self._addApplication(test) self.appData[test.app].storeResult(test) def notifyAllComplete(self): # allApps is {appname : [app]} for appList in self.allApps.values(): # appData is {app : data} for app in appList: if self.useJUnitFormat(app): data = self.appData[app] ReportWriter(self.runId).writeResults(app, data) def _addApplication(self, test): app = test.app self.appData[app] = JUnitApplicationData() self.allApps.setdefault(app.name, []).append(app)
class JUnitResponder(plugins.Responder): """Respond to test results and write out results in format suitable for JUnit report writer. Only does anything if the app has batch_junit_format:true in its config file """ def __init__(self, optionMap, *args): plugins.Responder.__init__(self) self.runId = optionMap.get("name", calculateBatchDate()) # use the command-line name if given, else the date self.allApps = OrderedDict() self.appData = OrderedDict() def useJUnitFormat(self, app): return app.getBatchConfigValue("batch_junit_format") == "true" def notifyComplete(self, test): if not self.useJUnitFormat(test.app): return if not self.appData.has_key(test.app): self._addApplication(test) self.appData[test.app].storeResult(test) def notifyAllComplete(self): # allApps is {appname : [app]} for appList in self.allApps.values(): # appData is {app : data} for app in appList: if self.useJUnitFormat(app): data = self.appData[app] ReportWriter(self.runId).writeResults(app, data) def _addApplication(self, test): app = test.app self.appData[app] = JUnitApplicationData() self.allApps.setdefault(app.name, []).append(app)
class TestIteratorMap: def __init__(self, dynamic, allApps): self.dict = OrderedDict() self.dynamic = dynamic self.parentApps = {} for app in allApps: for extra in [ app ] + app.extras: self.parentApps[extra] = app def getKey(self, test): if self.dynamic: return test elif test is not None: return self.parentApps.get(test.app, test.app), test.getRelPath() def store(self, test, iter): self.dict[self.getKey(test)] = iter def updateIterator(self, test, oldRelPath): # relative path of test has changed key = self.parentApps.get(test.app, test.app), oldRelPath iter = self.dict.get(key) if iter is not None: self.store(test, iter) del self.dict[key] return iter else: return self.getIterator(test) def getIterator(self, test): return self.dict.get(self.getKey(test)) def remove(self, test): key = self.getKey(test) if self.dict.has_key(key): del self.dict[key]
def getFreeTextData(self): data = OrderedDict() for test in self.getAllTests(): freeText = test.state.freeText if freeText: if not data.has_key(freeText): data[freeText] = [] data[freeText].append(test) return data.items()
def getFreeTextData(self, tests): data = OrderedDict() for testName, state, extraVersion in tests: freeText = state.freeText if hasattr(state, "freeText") else None if freeText: if not data.has_key(freeText): data[freeText] = [] data[freeText].append((testName, state, extraVersion)) return data.items()
def getFreeTextData(self, tests): data = OrderedDict() for testName, state, extraVersion in tests: freeText = state.freeText if freeText: if not data.has_key(freeText): data[freeText] = [] data[freeText].append((testName, state, extraVersion)) return data.items()
def decay_descriptors(self) : descs = OrderedDict() '''This won't work in all situations, eg if you have the same head particle as input to the next combined particle (something like Upsilon -> D0 D0bar), but it works with most.''' for comb in filter(lambda x : isinstance(x, CombineParticles), self.expandedMembers) : if hasattr(comb, 'DecayDescriptor') and comb.DecayDescriptor : head, strippeddesc = head_descriptor(comb.DecayDescriptor) if descs.has_key(head) : descs[head].append(comb.DecayDescriptor) else : descs[head] = [comb.DecayDescriptor] else : for desc in comb.DecayDescriptors : head, strippeddesc = head_descriptor(desc) if descs.has_key(head) : descs[head].append(desc) else : descs[head] = [desc] return descs
class GenerateTestSummary(guiplugins.ActionDialogGUI): def __init__(self, *args): guiplugins.ActionDialogGUI.__init__(self, *args) self.addOption("generate", "", possibleDirs=[os.getenv("TEXTTEST_TMP", "")], saveFile=True) self.batchAppData = OrderedDict() self.allApps = OrderedDict() def performOnCurrent(self): fileName = self.getFileName() for test in self.currTestSelection: if test.state.isComplete(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) self.writeTextSummary(fileName) def writeTextSummary(self, fileName): mailSender = MailSender() with open(fileName, "w") as f: for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) f.write(mailSender.makeContents(batchDataList, False)) def getFileName(self): fileName = self.optionGroup.getOptionValue("generate") if not fileName: raise plugins.TextTestError, "Cannot save selection - no file name specified" elif os.path.isdir(fileName): raise plugins.TextTestError, "Cannot save selection - existing directory specified" else: return fileName def _getTitle(self): return "Generate test summary" def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app)
def get_decay_descriptors(self) : seq = filter(lambda alg : 'CombineParticles' in alg, self.get_filter_sequence()) alldescs = OrderedDict() for alg in seq : props = self.get_properties(alg) if props['DecayDescriptor'] : descs = [props['DecayDescriptor']] else : descs = eval(props['DecayDescriptors']) for desc in descs : head, strippeddesc = head_descriptor(desc) if alldescs.has_key(head) : alldescs[head].append(strippeddesc) else : alldescs[head] = [desc] return alldescs
class GenerateTestSummary(guiplugins.ActionDialogGUI): def __init__(self, *args): guiplugins.ActionDialogGUI.__init__(self, *args) self.addOption("generate", "",possibleDirs=[os.getenv("TEXTTEST_TMP", "")], saveFile=True) self.batchAppData = OrderedDict() self.allApps = OrderedDict() def performOnCurrent(self): fileName = self.getFileName() for test in self.currTestSelection: if test.state.isComplete(): if not self.batchAppData.has_key(test.app): self.addApplication(test) self.batchAppData[test.app].storeCategory(test) self.writeTextSummary(fileName) def writeTextSummary(self, fileName): mailSender = MailSender() with open(fileName, "w") as f: for appList in self.allApps.values(): batchDataList = map(self.batchAppData.get, appList) f.write(mailSender.makeContents(batchDataList, False)) def getFileName(self): fileName = self.optionGroup.getOptionValue("generate") if not fileName: raise plugins.TextTestError, "Cannot save selection - no file name specified" elif os.path.isdir(fileName): raise plugins.TextTestError, "Cannot save selection - existing directory specified" else: return fileName def _getTitle(self): return "Generate test summary" def getRootSuite(self, test): if test.parent: return self.getRootSuite(test.parent) else: return test def addApplication(self, test): rootSuite = self.getRootSuite(test) app = test.app self.batchAppData[app] = BatchApplicationData(rootSuite) self.allApps.setdefault(app.name, []).append(app)
def suvan(): s = Session() try: pictures = s.query(Picture).order_by(desc(Picture.date_uploaded)).all() d = OrderedDict() for p in pictures: dstr = p.date_uploaded.strftime('%B %d, %Y') if not d.has_key(dstr): d[dstr] = {} d[dstr]['date'] = dstr d[dstr]['age'] = getAge(p.date_uploaded) d[dstr]['pictures'] = [] d[dstr]['pictures'].append(p) return render_template('suvan.html', pictures=d.values()) finally: s.close()
def get_decay_descriptors(self) : seq = self.get_filter_sequence() alldescs = OrderedDict() for alg in seq : props = self.get_properties(alg) if not 'DecayDescriptor' in props : continue if props['DecayDescriptor'] : descs = [props['DecayDescriptor']] elif 'DecayDescriptors' in props : descs = eval(props['DecayDescriptors']) else : continue for desc in descs : head, strippeddesc = head_descriptor(desc) if alldescs.has_key(head) : alldescs[head].append(strippeddesc) else : alldescs[head] = [desc] return alldescs
class TestIteratorMap: def __init__(self, dynamic, allApps): self.dict = OrderedDict() self.dynamic = dynamic self.parentApps = {} for app in allApps: for extra in [app] + app.extras: self.parentApps[extra] = app def getKey(self, test): if self.dynamic: return test elif test is not None: return self.parentApps.get(test.app, test.app), test.getRelPath() def store(self, test, iter): self.dict[self.getKey(test)] = iter def updateIterator(self, test, oldRelPath): # relative path of test has changed key = self.parentApps.get(test.app, test.app), oldRelPath iter = self.dict.get(key) if iter is not None: self.store(test, iter) del self.dict[key] return iter else: return self.getIterator(test) def getIterator(self, test): return self.dict.get(self.getKey(test)) def remove(self, test): key = self.getKey(test) if self.dict.has_key(key): del self.dict[key]
class Dependency: ## Construct an empty dependency tree # @param self the object reference # @param silent minimal feedback # @param autobuild warn rather than fail on multiple version dependnecies. XXX def __init__(self, silent=True, autobuild=False): ## The ASKAP top-level directory self.ASKAPROOT = os.environ.get("ASKAP_ROOT") if self.ASKAPROOT is None: msg = "ASKAP_ROOT environment variable is not defined" raise BuildError(msg) # self.DEPFILE = "dependencies" # The basename of the dependency file self.INSTALL_SUBDIR = "install" self._deps = OrderedDict() # self._bindirs = [] self._incdirs = [] self._libdirs = [] self._rootdirs = [] # self._cppflags = [] # XXX "defs" in package.info. LOFAR/log4cxx # self._env = [] self._jars = [] self._libs = [] self._packages = [] # self._ldlibpath = "" self._pypath = "" # self._autobuild = autobuild self._silent = silent # mimimal output self.selfupdate = False # should object request updates from svn def q_print(self, msg): if self._silent: return utils.q_print(msg) ## Get the path of the specified dependency package # @param self the current object # @param key the label of the package dependency # @return the path (relative to ASKAP_ROOT) to the package def get_dep_path(self, key): return self._deps[key]["path"] # Used by "in" test. # object.__contains__(self, item) # # Called to implement membership test operators. Should return true if item # is in self, false otherwise. For mapping objects, this should consider # the keys of the mapping rather than the values or the key-item pairs. # # For objects that do not define __contains__(), the membership test first # tries iteration via __iter__(), then the old sequence iteration protocol # via __getitem__(), see this section in the language reference. # # http://docs.python.org/reference/datamodel.html def __contains__(self, key): return self._deps.has_key(key) ## Get the absolute path to the dependency packages installed location # @param self the current object # @param key the label of the package dependency # @return the absolute path to the package installed location def get_install_path(self, key): rel_path = self._deps[key]["path"] full_path = os.path.join(self.ASKAPROOT, rel_path, self.INSTALL_SUBDIR) return os.path.abspath(full_path) def get_path(self): return os.path.pathsep.join(self._bindirs) ## Get the CPPFLAGS retrieved in the dependency analysis # @param self the object reference # @return a list of library names def get_libs(self, mapped=False): if mapped: return self._libs[:] else: return [m[0] for m in self._libs] ## Get the environment variables retrieved in the dependency analysis # @param self the object reference # @return a dictionary of ENVVAR => value pairs def get_env(self): return dict([i.split("=") for i in self._env]) ## Get the the java classpath for the depencies # @param self the object reference # @return a classpath string of the form x/y/z.jar:a/b/c.jar def get_classpath(self): return os.path.pathsep.join(self._jars) ## Get the root directories of the tags retrieved in the dependency analysis # @param self the object reference # @return a list of directory names def get_rootdirs( self, mapped=False): # XXX used in ant.py builder with mapped=true. if mapped: return [ (k, os.path.join( self.ASKAPROOT, v['path'])) \ for k,v in self._deps.iteritems()] return self._rootdirs[:] ## Get the LIBRARY directories retrieved in the dependency analysis # @param self the object reference # @param mapped return directory tuples (rootdir, libdir) # @return a list of library directories or tuples of rootdirs and libdirs def get_librarydirs(self, mapped=False): if mapped: return self._libdirs[:] else: return [m[0] for m in self._libdirs] ## Get the LD_LIBRARY_PATH accumulated in the dependency analysis # @param self the object reference # @return a string representing the LD_LIBRARY_PATH def get_ld_library_path(self): return self._ldlibpath.strip(":") ## Get the INCLUDE directories retrieved in the dependency analysis # @param self the object reference # @return a list of header file directories def get_includedirs(self): return self._incdirs[:] ## Get the CPPFLAGS retrieved in the dependency analysis # @param self the object reference # @return a list preprocessor flags def get_cppflags(self): return self._cppflags[:] def get_pythonpath(self): return self._pypath.strip(":") ## Get a list of doxygen tag files in the dependencies. This is used for # cross-referencing the documentation # @todo Re-enable: This has been disabled until it is working for python # @param self the object reference # @return a list of TAGFILES entries # XXX used only in scons_tools/askap_package.py def get_tagfiles(self): tagfiles = [] for pth in self._rootdirs: tagname = utils.tag_name(pth) tagpath = os.path.join(pth, tagname) if os.path.exists(tagpath): tagfiles.append('"%s=%s/html"' % (tagpath, pth)) return tagfiles def _get_dependencies(self, package): codename = utils.get_platform()['codename'] hostname = socket.gethostname().split(".")[0] for ext in ['default', codename, hostname]: if ext: depfile = '%s.%s' % (self.DEPFILE, ext) if package: depfile = os.path.join(self.ASKAPROOT, package, depfile) if self.selfupdate: # always update if it is the "root/target" package basedir = os.path.split(depfile)[0] or "." if not os.path.exists(basedir): utils.update_tree(basedir) self._get_depfile(depfile) def _get_depfile(self, depfile, overwrite=False): if not os.path.exists(depfile): # assume no dependencies return dfh = file(depfile) for line in dfh.readlines(): line = line.strip() if line.startswith("#"): continue kv = line.split("=", 1) if len(kv) == 2: key = kv[0].strip() value = kv[1].strip() # see if the file explicitly names any libs lspl = value.split(";") libs = None if len(lspl) > 1: libs = lspl[1].strip().split() value = lspl[0] self._add_dependency(key, value, libs, overwrite) if not value.startswith("/"): # recurse into ASKAP dependencies # otherwise just move on as we specified system dependency # which will not have a dependency file self._packages.append(value) self._get_dependencies(value) dfh.close() def _get_info(self, packagedir): info = { # A single directory path relative to the install directory. 'bindir': 'bin', 'distdir': 'dist', 'incdir': 'include', 'libdir': 'lib', # Space separated lists. XXX Default should be '[]'? 'defs': None, 'env': None, 'jars': None, 'libs': None, # Define a single python module name and version. # e.g. pymodule=numpy==1.2.0 'pymodule': None, } sslists = ['defs', 'env', 'jars', 'libs'] infofile = os.path.join(packagedir, 'package.info') if os.path.exists(infofile): f = file(infofile) for line in f.readlines(): line = line.strip() if line.startswith("#"): continue kv = line.split("=", 1) if len(kv) == 2: key = kv[0].strip() value = kv[1].strip() if key in info.keys(): if key in sslists: info[key] = value.split() else: info[key] = value f.close() return info def _add_dependency(self, key, value, libs, overwrite=False): if self._deps.has_key(key): # deal with potential symbolic links for 'default' packages paths = [self._deps[key]["path"], value] outpaths = [] for pth in paths: if not pth.startswith("/"): pth = os.path.join(os.environ["ASKAP_ROOT"], pth) pth = os.path.realpath(pth) outpaths.append(pth) if outpaths[0] == outpaths[1]: if libs: if self._deps[key]["libs"] is not None: # prepend the libs self._deps[key][ "libs"] = libs + self._deps[key]["libs"] else: self._deps[key]["libs"] = libs self._deps.toend(key) else: # another dependency, so move it to the end, so link # order is correct self._deps.toend(key) return else: if overwrite: self._deps[key]["path"] = value self.q_print( "info: Overwriting default package dependency '%s' with host specific package (from %s)" % (key, value)) elif self._autobuild: # XXX maybe a mistake? self.q_print( "warn: Possible multiple version dependency \n\ %s != %s" % (self._deps[key]["path"], value)) else: raise BuildError("Multiple version dependency \n\ %s != %s" % (self._deps[key]["path"], value)) else: self.q_print("info: Adding package dependency '%s' (from %s)" % (key, value)) # now update the dependency itself # XXX only used in Tools/scons_tools/askap_package.py if self.selfupdate: utils.update_tree(value) self._deps[key] = {"path": value, "libs": libs} def _remove_duplicates(self, values): # find unique elements libs = [v[0] for v in values] for k in set(libs): # remove all but last duplicate entry while libs.count(k) > 1: idx = libs.index(k) libs.pop(idx) values.pop(idx) ## Add a ThirdPartyLibrary or ASKAP package to the environment # This will add the package path in ASKAP_ROOT # @param self the object reference # @param pkgname The name of the package as in the repository, e.g. # lapack. Default None means that this is defined in local # dependencies.xyz # @param tag The location of the package, e.g. # 3rdParty/lapack-3.1.1/lapack-3.1.1 # @param libs The name of the libraries to link against, # default None is the same as the pkgname # @param libdir The location of the library dir relative to the package, # default None which will use settings in the package.info file # @param incdir The location of the include dir relative to the package, # default None which will use settings in the package.info file # @param pymodule the 'require' statement to specify this dependency # statement, e.g. "askap.loghandlers==current" def add_package(self, pkgname=None, tag=None, libs=None, libdir=None, incdir=None, bindir=None, pymodule=None): self._deps = OrderedDict() if pkgname: if not tag: BuildError("No tag specified") if self.selfupdate: #if not os.path.exists(tag): utils.update_tree(tag) self._add_path(pkgname, self.ASKAPROOT, tag, libs, libdir, incdir, bindir, pymodule) self.q_print("info: Adding package '%s'" % pkgname) if tag: tag = os.path.join(self.ASKAPROOT, tag) self._get_dependencies(tag) parent = '' for key, value in self._deps.iteritems(): self._add_path(key, self.ASKAPROOT, value["path"], libs=value["libs"], parent=parent) parent = value["path"] # Add a ASKAP repository path to the environment # This sets up LIBPATH and CPPPATH def _add_path(self, pkgname, root, tag, parent='', libs=None, libdir=None, incdir=None, bindir=None, pymodule=None): loc = None if tag.startswith("/"): # external package loc = tag else: # ASKAP package or 3rdParty library loc = os.path.join(root, tag) rloc = os.path.relpath(loc, self.ASKAPROOT) if not os.path.exists(loc): raise BuildError( "Dependency directory '%s' does not exist (requested by %s)." % (rloc, parent)) self._rootdirs += [loc] info = self._get_info(loc) # get optional package info idir = os.path.join(loc, self.INSTALL_SUBDIR) # actual installion. if not bindir: # add bin directory bindir = info["bindir"] if bindir: # None means disabled in info file pth = os.path.join(idir, bindir) if os.path.exists(pth): self._bindirs += [pth] if not incdir: # add include directory incdir = info["incdir"] if incdir: # None means disabled in info file pth = os.path.join(idir, incdir) if not os.path.exists(pth): if not pymodule: self.q_print("warn: incdir '%s' does not exist." % pth) else: self._incdirs += [pth] if not libdir: # add library directory libdir = info["libdir"] if libdir: # None means disabled in info file pth = os.path.join(idir, libdir) if not os.path.exists(pth): if not pymodule: self.q_print("warn: libdir '%s' does not exist." % pth) else: self._ldlibpath += os.path.pathsep + pth self._libdirs += [(pth, idir)] libs = libs or info["libs"] addlibs = True if isinstance(libs, list) and len(libs) == 0: addlibs = False libs = libs or pkgname if not isinstance(libs, list): libs = [libs] if addlibs: # only add lib if it's not a python module nlibs = [] for lib in libs: instdir = idir if not glob.glob("{0}/lib{1}*".format( os.path.join(idir, libdir), lib)): instdir = "" nlibs.append((lib, instdir)) self._libs += nlibs libs = self._libs[:] # copy self._remove_duplicates(libs) self._libs = libs if info["defs"]: # add package defines self._cppflags += info["defs"] if info["env"]: # add environment variables self._env += info["env"] # check whether it is python, i.e. pymodule entry in package.info if not pymodule: pymodule = info["pymodule"] if pymodule: pth = os.path.join(idir, libdir, utils.get_site_dir()) if self._pypath.find(pth) < 1: self._pypath = os.path.pathsep.join([pth, self._pypath]) if info["jars"]: pth = os.path.join(idir, libdir) if not os.path.exists(pth): if not pymodule: self.q_print("warn: libdir '%s' does not exist." % pth) for jar in info["jars"]: jar = os.path.join(pth, jar) if jar not in self._jars: self._jars.append(jar)
class POParser: """Parses an existing po- file and builds a dictionary according to MessageCatalog. POParser is the deserializer, POWriter the serializer. """ def __init__(self, file): self._file = file self._in_paren = re.compile(r'"(.*)"') self.msgdict = OrderedDict() # see MessageCatalog for structure self.line = '' self.sameMessageEntry = True self.msgid = '' self.msgstr = '' self.references = [] self.automatic_comments = [] self.comments = [] def read(self): """Start reading from file. After the call to read() has finished, you may access the structure that I read in through the ``msgdict`` attribute.""" for no, line in enumerate(self._file): self.line = line oldstatus = self.sameMessageEntry if oldstatus: self._readSameMessage() else: self._readNewMessage() newstatus = self.sameMessageEntry if oldstatus != newstatus: # function changed stateid: call new function with same line if newstatus: self._readSameMessage() else: self._readNewMessage() # last msg if not self.msgdict.has_key(self.msgid): self.line = '#:' self._readNewMessage() def _readSameMessage(self): """We're reading a comment or msgid.""" line = self.line if line.startswith('msgstr'): self.sameMessageEntry = False elif line.startswith('#:'): self.references.append(line[2:].strip()) elif line.startswith('#.'): line = line[2:].strip() ls = line.startswith if ls(ORIGINAL_COMMENT): line = line.replace(ORIGINAL_COMMENT, DEFAULT_COMMENT) if not line in self.automatic_comments: self.automatic_comments.append(line) elif line.startswith('#'): line = line[1:].strip() ls = line.startswith if ls(ORIGINAL_COMMENT) or ls(DEFAULT_COMMENT): line = line.replace(ORIGINAL_COMMENT, DEFAULT_COMMENT) if not line in self.automatic_comments: self.automatic_comments.append(line) else: self.comments.append(line) else: search = self._in_paren.search(line) if search: self.msgid += search.groups()[0] def _readNewMessage(self): """We're reading msgstr.""" sw = self.line.startswith if sw('#') or sw('msgid'): self.sameMessageEntry = True self.msgdict[self.msgid] = MessageEntry(self.msgid,\ msgstr=self.msgstr,\ references=self.references,\ automatic_comments=self.automatic_comments,\ comments=self.comments) # reset variables self.msgid = self.msgstr = '' self.references = [] self.automatic_comments = [] self.comments = [] else: search = self._in_paren.search(self.line) if search: self.msgstr += search.groups()[0]
def SecondButtonPress(url, HostPage, page=None, elm="", elm2="", wform=0, addkey=None, removekey=None, cookies={}, wait=0, captchakey=None, captchaimg=None, captchacookies={}, split=None, GetUserAgent=None): domain = HostPage.split('/')[2] payload = OrderedDict() headers = OrderedDict() headers['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' headers['Accept-Charset'] = 'ISO-8859-1,utf-8;q=0.7,*;q=0.3' headers['Accept-Encoding'] = 'gzip,deflate,sdch' headers['Accept-Language'] = 'en-US,en;q=0.8' headers['Cache-Control'] = 'max-age=0' headers['Connection'] = 'keep-alive' headers['Referer'] = url if GetUserAgent == None: headers['User-Agent'] = UserAgent else: headers['User-Agent'] = GetUserAgent session = requests.session() requests.utils.add_dict_to_cookiejar(session.cookies, cookies) if page != None: s = page else: s = session.get(HostPage, headers=headers) try: form = HTML.ElementFromString(s.content) except: form = HTML.ElementFromString(s) try: whichform = form.xpath('//'+elm+'form')[wform] if len(whichform.xpath('./'+elm2+'input')) != 0: for input in whichform.xpath('./'+elm2+'input'): if input.get('name') != None: key = input.get('name') value = input.get('value') if key != 'method_premium': if not payload.has_key(key): payload[key] = [value] else: payload[key].append(value) else: for input in form.xpath('//input'): if input.get('name') != None: key = input.get('name') value = input.get('value') if key != 'method_premium': if not payload.has_key(key): payload[key] = [value] else: payload[key].append(value) if captchakey != None: try: payload[captchakey] = GetImgValue(url=captchaimg, HostPage=HostPage, UserAgent=UserAgent, cookies=captchacookies, split=split) except: payload[captchakey] = "Processing Issue" if addkey != None: payload.update(addkey) if removekey != None: for key in removekey: try: del payload[key] except KeyError: pass Log(payload) if wait != 0: #wait required time.sleep(wait) headers['Content-Type'] = 'application/x-www-form-urlencoded' headers['Origin'] = 'http://'+domain headers['Referer'] = HostPage formaction = form.xpath('//'+elm+'form')[wform].get('action') if formaction != None and formaction != "": if formaction.split('/')[0] == 'http:': HostPage = formaction elif len(formaction.split('/')) == 2: HostPage = 'http://' + HostPage.split('/')[2] + formaction elif len(formaction.split('/')) == 1: HostPage = HostPage.rpartition('/')[0] + '/' + formaction r = session.post(HostPage, data=payload, headers=headers, allow_redirects=True) r.raise_for_status() r.cookies = session.cookies return r except: s.cookies = session.cookies return s
class XRCCodeWriter(BaseLangCodeWriter, wcodegen.XRCMixin): """\ Code writer class for writing XRC XML code out of the designed GUI elements. """ xrc_objects = None """\ dictionary of active L{XrcObject} instances: during the code generation it stores all the non-sizer objects that have children (i.e. frames, dialogs, panels, notebooks, etc.), while at the end of the code generation, before L{finalize} is called, it contains only the true toplevel objects (frames and dialogs), and is used to write their XML code (see L{finalize}). The other objects are deleted when L{add_object} is called with their corresponding code_object as argument (see L{add_object}) """ global_property_writers = { 'font': FontPropertyHandler, 'events': EventsPropertyHandler, 'extraproperties': ExtraPropertiesPropertyHandler, } """\ Dictionary whose items are custom handlers for widget properties """ property_writers = {} """\ Dictionary of dictionaries of property handlers specific for a widget the keys are the class names of the widgets Example: property_writers['wxRadioBox'] = {'choices', choices_handler} """ obj_builders = {} """\ Dictionary of ``writers'' for the various objects """ tmpl_encoding = '<?xml version="1.0" encoding="%s"?>\n' tmpl_generated_by = '<!-- %(generated_by)s -->' use_names_for_binding_events = False # inject different XRC objects XrcObject = XrcObject SizerItemXrcObject = SizerItemXrcObject SpacerXrcObject = SpacerXrcObject DefaultXrcObject = DefaultXrcObject NotImplementedXrcObject = NotImplementedXrcObject def __init__(self): BaseLangCodeWriter.__init__(self) # Inject to all classed derived from WrcObject if not hasattr(XrcObject, 'tabs'): XrcObject.tabs = self.tabs if not hasattr(XrcObject, '_format_comment'): XrcObject._format_comment = self._format_comment def init_lang(self, app_attrs): # for now we handle only single-file code generation if self.multiple_files: raise errors.WxgXRCMultipleFilesNotSupported() # overwrite existing sources always self._overwrite = True self.output_file_name = app_attrs['path'] self.out_file = StringIO.StringIO() self.out_file.write('\n<resource version="2.3.0.1">\n') self.curr_tab = 1 self.xrc_objects = OrderedDict() def finalize(self): # write the code for every toplevel object for obj in self.xrc_objects.itervalues(): obj.write(self.out_file, 1) self.out_file.write('</resource>\n') # store the contents to file self.save_file(self.output_file_name, self.out_file.getvalue()) def add_app(self, app_attrs, top_win_class): """\ In the case of XRC output, there's no wxApp code to generate """ pass def add_object(self, unused, sub_obj): """\ Adds the object sub_obj to the XRC tree. The first argument is unused. """ # what we need in XRC is not top_obj, but sub_obj's true parent top_obj = sub_obj.parent builder = self.obj_builders.get(sub_obj.base, DefaultXrcObject) try: # check whether we already created the xrc_obj xrc_obj = sub_obj.xrc except AttributeError: xrc_obj = builder(sub_obj) # builder functions must return a # subclass of XrcObject sub_obj.xrc = xrc_obj else: # if we found it, remove it from the self.xrc_objects dictionary # (if it was there, i.e. the object is not a sizer), because this # isn't a true toplevel object if sub_obj in self.xrc_objects: del self.xrc_objects[sub_obj] # let's see if sub_obj's parent already has an XrcObject: if so, it # is temporarily stored in the self.xrc_objects dict... if top_obj in self.xrc_objects: top_xrc = self.xrc_objects[top_obj] else: # ...otherwise, create it and store it in the self.xrc_objects # dict top_xrc = self.obj_builders.get(top_obj.base, DefaultXrcObject)(top_obj) top_obj.xrc = top_xrc self.xrc_objects[top_obj] = top_xrc top_obj.xrc.children.append(xrc_obj) def add_sizeritem(self, unused, sizer, obj, option, flag, border): """\ Adds a sizeritem to the XRC tree. The first argument is unused. """ # what we need in XRC is not toplevel, but sub_obj's true parent toplevel = obj.parent top_xrc = toplevel.xrc obj_xrc = obj.xrc try: sizer_xrc = sizer.xrc except AttributeError: # if the sizer has not an XrcObject yet, create it now sizer_xrc = self.obj_builders.get(sizer.base, DefaultXrcObject)(sizer) sizer.xrc = sizer_xrc # we now have to move the children from 'toplevel' to 'sizer' index = top_xrc.children.index(obj_xrc) if obj.klass == 'spacer': w = obj.properties.get('width', '0') h = obj.properties.get('height', '0') obj_xrc = SpacerXrcObject('%s, %s' % (w, h), str(option), str(flag), str(border)) sizer.xrc.children.append(obj_xrc) else: sizeritem_xrc = SizerItemXrcObject(obj_xrc, str(option), str(flag), str(border)) sizer.xrc.children.append(sizeritem_xrc) del top_xrc.children[index] def add_class(self, code_obj): """\ Add class behaves very differently for XRC output than for other languages (i.e. python): since custom classes are not supported in XRC, this has effect only for true toplevel widgets, i.e. frames and dialogs. For other kinds of widgets, this is equivalent to add_object """ if not self.xrc_objects.has_key(code_obj): builder = self.obj_builders.get(code_obj.base, DefaultXrcObject) xrc_obj = builder(code_obj) code_obj.xrc = xrc_obj # add the xrc_obj to the dict of the toplevel ones self.xrc_objects[code_obj] = xrc_obj def generate_code_id(self, obj, id=None): return '', '' def _format_comment(self, msg): return '<!-- %s -->' % escape(msg.rstrip()) def _quote_str(self, s): return s
class Controller(object): def __init__(self, args={}): self.args = args # arguments from command line self.config = {} # config to be processed from .dexy files self.docs = [] self.timing = [] self.virtual_docs = [] self.batch_start_time = None self.batch_finish_time = None self.batch_elapsed_time = None # Set up logging if args.has_key("logsdir") and args.has_key("logfile"): self.log = dexy.utils.get_log("dexy.controller", args['logsdir'], args['logfile'], args['loglevel']) else: self.log = Constants.NULL_LOGGER # Set up db if args.has_key('dbclass') and args.has_key("logsdir") and args.has_key("dbfile"): self.db = dexy.utils.get_db(self.args['dbclass'], logsdir=self.args['logsdir'], dbfile=args['dbfile']) else: self.db = None # List of directories that reporters use, these will not be processed by dexy self.reports_dirs = dexy.introspect.reports_dirs(self.log) # list of artifact classes - if nothing else uses this then move # it into the if statement below and don't cache it self.artifact_classes = dexy.introspect.artifact_classes(self.log) if args.has_key('artifactclass'): if self.artifact_classes.has_key(args['artifactclass']): self.artifact_class = self.artifact_classes[args['artifactclass']] else: raise dexy.commands.UserFeedback("Artifact class name %s not found in %s" % (args['artifactclass'], ",".join(self.artifact_classes.keys()))) def run(self): """ This does all the work. """ self.batch_start_time = time.time() start = self.batch_start_time self.log.debug("populating Document class filter list") dexy.document.Document.filter_list = dexy.introspect.filters(self.log) self.timing.append(("populate-filter-list", time.time() - start)) start = time.time() self.log.debug("loading config...") self.load_config() self.log.debug("finished loading config.") self.timing.append(("load-config", time.time() - start)) start = time.time() self.log.debug("processing config, populating document list...") self.process_config() self.log.debug("finished processing config.") self.timing.append(("process-config", time.time() - start)) start = time.time() # set the list of documents which are virtual self.virtual_docs = [d for d in self.docs if d.virtual] try: if not self.args['dryrun']: [doc.setup() for doc in self.docs] self.docs = [doc.run() for doc in self.docs] except dexy.commands.UserFeedback as e: self.persist() raise e self.timing.append(("run-docs", time.time() - start)) self.batch_finish_time = time.time() self.batch_elapsed_time = self.batch_finish_time - self.batch_start_time self.log.debug("persisting batch info...") self.persist() self.log.debug("finished persisting.") self.log.debug("finished processing. elapsed time %s" % self.batch_elapsed_time) def persist(self): """ Persists the database. Saves some information about this batch in a JSON file (for use by reporters or for debugging). """ self.db.persist() dexy.utils.save_batch_info(self.batch_id, self.batch_info(), self.args['logsdir']) def batch_info(self): """ Dict of info to save """ return { "id" : self.batch_id, "config" : self.config, "args" : self.args, "docs" : dict((doc.key(), doc.document_info()) for doc in self.docs), "start_time" : self.batch_start_time, "finish_time" : self.batch_finish_time, "elapsed" : self.batch_elapsed_time, "timing" : self.timing } def config_for_directory(self, path): """ Determine the config applicable within a directory by looking in every parent directory (up as far as the dexy project root) for config files and combining them, such that subdirectories override parents. """ self.log.debug("Determining configuration applicable in %s" % path) global_args = {} config_dict = {} variables = {} config_file = self.args['config'] path_elements = path.split(os.sep) for i in range(0,len(path_elements)+1): config_path = os.path.join(*(path_elements[0:i] + [config_file])) config_files = glob.glob(config_path) # Don't propagate virtual files for k in config_dict.keys(): propagate_virtual = config_dict[k].has_key('propagate') and config_dict[k]['propagate'] if k.startswith("@") and not propagate_virtual: del config_dict[k] for f in config_files: self.log.info("loading config file %s" % f) with open(f, "r") as cf: try: json_dict = json.load(cf) except ValueError as e: msg = "Your config file %s has invalid JSON\n%s" % (f, e.message) raise dexy.commands.UserFeedback(msg) if json_dict.has_key("$reset"): # Reset the config, i.e. ignore everything from parent # directories, just use this directory's config in json_dict config_dict = json_dict else: # Combine any config in this dir with parent dir config. config_dict.update(json_dict) if json_dict.has_key("$globals"): global_args.update(json_dict["$globals"]) if json_dict.has_key("$variables"): variables.update(json_dict["$variables"]) config_dict['$globals'] = global_args config_dict['$variables'] = variables return config_dict def load_config(self): """ This method determines which subdirectories will be included in the dexy batch and populates the config dict for each of them. """ if self.args['recurse']: # Figure out which directories need to be skipped exclude_at_root = Constants.EXCLUDE_DIRS_ROOT + self.reports_dirs + [self.args['artifactsdir'], self.args['logsdir']] self.log.debug("project root excluded directories %s" % ", ".join(exclude_at_root)) exclude_everywhere = Constants.EXCLUDE_DIRS_ALL_LEVELS self.log.debug("directories excluded at all levels %s" % ", ".join(exclude_everywhere)) for dirpath, dirnames, filenames in os.walk(self.args['directory']): # Figure out if we should process this directory and recurse # into its children. Start with process_dir = True process_dir = True # Remove any children we don't want to recurse into. if dirpath == ".": for x in exclude_at_root: if x in dirnames: dirnames.remove(x) for x in exclude_everywhere: if x in dirnames: dirnames.remove(x) # Look for a .nodexy file if os.path.isfile(os.path.join(dirpath, '.nodexy')): # If we find one... self.log.info(".nodexy file found in %s" % dirpath) # ...remove all child dirs from processing... for i in xrange(len(dirnames)): dirnames.pop() # ...and skip this directory. process_dir = False # Check if we match any excludes specified on the command line args_exclude = self.args['exclude'] if isinstance(args_exclude, str): args_exclude = args_exclude.split() for pattern in args_exclude: for d in dirnames: m1 = re.match(pattern, d) m2 = re.match("./%s" % pattern, d) m3 = re.match("%s/" % pattern, d) m4 = re.match("./%s/" % pattern, d) if m1 or m2 or m3 or m4: dirnames.remove(d) if process_dir: self.config[dirpath] = self.config_for_directory(dirpath) else: # Not recursing dirpath = self.args['directory'] self.config[dirpath] = self.config_for_directory(dirpath) def process_config(self): """ Processes a populated config dict, identifies files to be processed, creates Document objects for each, links dependencies and finally does topological sort to establish order of batch run. """ # Define the parse_doc nested function which we will call recursively. def parse_doc(path, input_directive, args = {}): # If a specification is nested in a dependency, then input_directive # may be a dict. If so, split it into parts before continuing. try: a, b = input_directive.popitem() input_directive = a args = b except AttributeError: pass tokens = input_directive.split("|") if "/" in tokens[0]: raise dexy.commands.UserFeedback("paths not allowed in tokens: %s" % tokens[0]) if path == '.': glob_string = tokens[0] else: glob_string = os.path.join(re.sub("^\./", "", path), tokens[0]) filters = tokens[1:] docs = [] # virtual document if re.search("@", glob_string): virtual = True dangerous = any(k in ['url', 'repo', 'path'] for k in args) if dangerous and not self.args['danger']: msg = "You are attempting to access a remote file %s." % glob_string msg += " You must specify -danger option to do this.\n" raise dexy.commands.UserFeedback(msg) glob_string = glob_string.replace("@", "") else: virtual = False regex = fnmatch.translate(glob_string).replace(".*", "(.*)") matcher = re.compile(regex) files = glob.glob(glob_string) nofiles = len(files) == 0 if nofiles and virtual: files = [glob_string] for f in files: create = True if not virtual: if os.path.isdir(f): create = False if args.has_key('disabled'): if args['disabled']: create = False self.log.warn("document %s|%s disabled" % (f, "|".join(filters))) inputs = [] if args.has_key('inputs'): if isinstance(args['inputs'], str) or isinstance(args['inputs'], unicode): raise dexy.commands.UserFeedback("inputs for %s should be an array" % f) for i in args['inputs']: # Create document objects for input patterns (just in this directory) for doc in parse_doc(path, i): inputs.append(doc.key()) m = matcher.match(f) if m and len(m.groups()) > 0: rootname = matcher.match(f).group(1) # The 'ifinput' directive says that if an input exists matching # the specified pattern, we should create this document and it # will depend on the specified input. if args.has_key('ifinput'): if isinstance(args['ifinput'], str) or isinstance(args['ifinput'], unicode): ifinputs = [args['ifinput']] else: self.log.debug("treating input %s as iterable. class: %s" % ( args['ifinput'], args['ifinput'].__class__.__name__)) ifinputs = args['ifinput'] for s in ifinputs: self.log.debug("evaluating ifinput %s" % s) ifinput = s.replace("%", rootname) self.log.debug("evaluating ifinput %s" % ifinput) input_docs = parse_doc(path, ifinput, {}) for input_doc in input_docs: inputs.append(input_doc.key()) if len(input_docs) == 0: create = False if args.has_key('ifnoinput'): ifinput = args['ifnoinput'].replace("%", rootname) input_docs = parse_doc(path, ifinput, {}) if len(input_docs) > 0: create = False if args.has_key('except'): try: except_re = re.compile(args['except']) except sre_constants.error as e: raise dexy.commands.UserFeedback("""You passed 'except' value of %s. Please pass a valid Python-style regular expression for 'except', NOT a glob-style matcher. Error message from re.compile: %s""" % (args['except'], e)) if re.match(except_re, f): self.log.warn("skipping %s for %s as it matches except pattern %s" % ( f, input_directive, args['except'] )) create = False if create: doc = dexy.document.Document() doc.set_controller(self) # Filters can either be included in the name... doc.set_name_and_filters(f, filters) # ...or they may be listed explicitly. if args.has_key('filters'): doc.filters += args['filters'] if args.has_key('loglevel'): doc.loglevelname = args['loglevel'] doc.setup_log() # After name has been set doc.virtual = virtual key = doc.key() self.log.debug("creating doc %s for glob %s" % (key, glob_string)) if self.members.has_key(key): doc = self.members[key] if args.has_key('priority'): doc.priority = args['priority'] del args['priority'] doc.args.update(args) if args.has_key('allinputs'): doc.use_all_inputs = args['allinputs'] if args.has_key('inputs'): doc.input_args = copy.copy(args['inputs']) doc.input_keys = [] for i in inputs: doc.add_input_key(i) self.members[key] = doc docs.append(doc) # docs is a local list of docs return docs # end of parse_doc nested function def get_pos(member): key = member.key() return self.members.keys().index(key) def depend(parent, child): self.depends.append((get_pos(child), get_pos(parent))) # The real processing starts here. self.members = OrderedDict() self.depends = [] self.batch_id = self.db.next_batch_id() if not self.args['silent']: print "batch id is", self.batch_id for path, config in self.config.iteritems(): ### @export "features-global-args-1" if config.has_key("$globals"): global_args = config["$globals"] else: global_args = {} if config.has_key("$variables"): global_variables = config["$variables"] else: global_variables = {} if self.args.has_key('globals'): global_args.update(self.args['globals']) for k, v in config.iteritems(): local_args = global_args.copy() local_args.update(v) local_args['$variables'] = global_variables for kg in global_args.keys(): if local_args.has_key(kg): if isinstance(local_args[kg], dict): local_args[kg].update(global_args[kg]) parse_doc(path, k, local_args) ### @end # Determine dependencies total_dependencies = 0 self.log.debug("Finalizing dependencies between documents...") for doc in self.members.values(): doc.finalize_inputs(self.members) total_dependencies += len(doc.inputs) for input_doc in doc.inputs: depend(doc, input_doc) self.log.debug("finalized dependencies for %s" % doc.key()) if len(doc.inputs) > 10: self.log.debug("%s inputs added" % len(doc.inputs)) elif len(doc.inputs) == 0: self.log.debug("no inputs added") else: self.log.debug("inputs added: %s" % ", ".join(d.key() for d in doc.inputs)) if len(self.args['run']) > 0: # Only run the specified document, and its dependencies. new_members = OrderedDict() new_depends = [] def new_get_pos(member): key = member.key() return new_members.keys().index(key) def new_depend(parent, child): new_depends.append((new_get_pos(child), new_get_pos(parent))) def parse_new_document(d): new_members[d.key()] = d for input_doc in d.inputs: if not input_doc.key() in new_members.keys(): new_members[input_doc.key()] = input_doc new_depend(d, input_doc) parse_new_document(input_doc) run_key = self.args['run'] if self.members.has_key(run_key): doc = self.members[run_key] else: matches = [k for k in self.members.keys() if k.startswith(run_key)] matches.sort(key=lambda k: len(self.members[k].inputs)) doc = self.members[matches[-1]] parse_new_document(doc) if not self.args['silent']: print "limiting members list to %s and its dependencies, %s/%s documents will be run" % (doc.key(), len(new_members), len(self.members)) self.members = new_members self.depends = new_depends num_members = len(self.members) if num_members > 0: dep_ratio = float(total_dependencies)/num_members else: dep_ratio = None if not self.args['silent']: print "sorting %s documents into run order, there are %s total dependencies" % (num_members, total_dependencies) if dep_ratio: print "ratio of dependencies to documents is %0.1f" % (dep_ratio) if dep_ratio > 10: print "if you are experiencing performance problems:" print "call dexy with -dryrun and inspect logs/batch-XXXX.json to debug dependencies" print "consider using -strictinherit or reducing your use of 'allinputs' " try: self.log.debug("Beginning topological sort...") topsort_ordering = topsort(self.depends) self.log.debug("Topological sort completed successfully.") except CycleError as e: print "There are circular dependencies!" answer, num_parents, children = e.args for child, parents in children.items(): for parent in parents: print "%s depends on %s" % (self.members.keys()[parent], self.members.keys()[child]) raise dexy.commands.UserFeedback(e.message) docs_without_dependencies = frozenset(range(len(self.members))) - frozenset(topsort_ordering) self.ordering = topsort_ordering + list(docs_without_dependencies) for i in self.ordering: key = self.members.keys()[i] self.docs.append(self.members[key])
class XRCCodeWriter(BaseCodeWriter): """\ Code writer class for writing XRC XML code out of the designed GUI elements """ default_extensions = ['xrc'] language = "XRC" xrc_objects = None """\ dictionary of active L{XrcObject} instances: during the code generation it stores all the non-sizer objects that have children (i.e. frames, dialogs, panels, notebooks, etc.), while at the end of the code generation, before L{finalize} is called, it contains only the true toplevel objects (frames and dialogs), and is used to write their XML code (see L{finalize}). The other objects are deleted when L{add_object} is called with their corresponding code_object as argument (see L{add_object}) """ global_property_writers = { 'font': FontPropertyHandler, 'events': EventsPropertyHandler, 'extraproperties': ExtraPropertiesPropertyHandler, } """\ Dictionary whose items are custom handlers for widget properties """ property_writers = {} """\ Dictionary of dictionaries of property handlers specific for a widget the keys are the class names of the widgets Example: property_writers['wxRadioBox'] = {'choices', choices_handler} """ obj_builders = {} """\ Dictionary of ``writers'' for the various objects """ tmpl_encoding = '<?xml version="1.0" encoding="%s"?>\n' tmpl_generated_by = '<!-- %(generated_by)s -->' # Nested classes class XrcObject(object): """\ Class to produce the XRC code for a given widget. This is a base class which does nothing """ def __init__(self): self.properties = {} self.children = [] # sub-objects def write_child_prologue(self, child, out_file, ntabs): pass def write_child_epilogue(self, child, out_file, ntabs): pass def write_property(self, name, val, outfile, ntabs): pass def write(self, out_file, ntabs): pass def warning(self, msg): """\ Show a warning message @param msg: Warning message @type msg: String @see: L{common.MessageLogger.warn()} """ common.message.warn(msg) # end of class XrcObject class SizerItemXrcObject(XrcObject): """\ XrcObject to handle sizer items """ def __init__(self, obj, option, flag, border): XRCCodeWriter.XrcObject.__init__(self) self.obj = obj # the XrcObject representing the widget self.option = option self.flag = flag self.border = border def write(self, out_file, ntabs): write = out_file.write write(self.tabs(ntabs) + '<object class="sizeritem">\n') if self.option != '0': write(self.tabs(ntabs + 1) + '<option>%s</option>\n' % \ self.option) if self.flag and self.flag != '0': write(self.tabs(ntabs + 1) + '<flag>%s</flag>\n' % self.flag) if self.border != '0': write(self.tabs(ntabs + 1) + '<border>%s</border>\n' % \ self.border) # write the widget self.obj.write(out_file, ntabs + 1) write(self.tabs(ntabs) + '</object>\n') # end of class SizerItemXrcObject class SpacerXrcObject(XrcObject): """\ XrcObject to handle widgets """ def __init__(self, size_str, option, flag, border): XRCCodeWriter.XrcObject.__init__(self) self.size_str = size_str self.option = option self.flag = flag self.border = border def write(self, out_file, ntabs): write = out_file.write write(self.tabs(ntabs) + '<object class="spacer">\n') write(self.tabs(ntabs + 1) + \ '<size>%s</size>\n' % self.size_str.strip()) if self.option != '0': write(self.tabs(ntabs + 1) + '<option>%s</option>\n' % \ self.option) if self.flag and self.flag != '0': write(self.tabs(ntabs + 1) + '<flag>%s</flag>\n' % self.flag) if self.border != '0': write(self.tabs(ntabs + 1) + '<border>%s</border>\n' % \ self.border) write(self.tabs(ntabs) + '</object>\n') # end of class SpacerXrcObject class DefaultXrcObject(XrcObject): """\ Standard XrcObject for every widget, used if no specific XrcObject is available """ def __init__(self, code_obj): XRCCodeWriter.XrcObject.__init__(self) self.properties = code_obj.properties self.code_obj = code_obj self.name = code_obj.name self.klass = code_obj.base # custom classes aren't allowed in XRC self.subclass = code_obj.klass def write_property(self, name, val, outfile, ntabs): if val: name = escape(name) outfile.write(self.tabs(ntabs) + '<%s>%s</%s>\n' % \ (name, escape(val), name)) def write(self, out_file, ntabs): write = out_file.write if self.code_obj.in_sizers: write(self.tabs(ntabs) + \ '<object class=%s>\n' % quoteattr(self.klass)) else: if self.subclass and self.subclass != self.klass: write(self.tabs(ntabs) + '<object class=%s name=%s subclass=%s>\n' % \ (quoteattr(self.klass), quoteattr(self.name), quoteattr(self.subclass))) else: write(self.tabs(ntabs) + '<object class=%s name=%s>\n' % \ (quoteattr(self.klass), quoteattr(self.name))) tab_str = self.tabs(ntabs + 1) # write the properties if self.properties.has_key('foreground'): if self.properties['foreground'].startswith('#'): # XRC does not support colors from system settings self.properties['fg'] = self.properties['foreground'] del self.properties['foreground'] if self.properties.has_key('background'): if self.properties['background'].startswith('#'): # XRC does not support colors from system settings self.properties['bg'] = self.properties['background'] del self.properties['background'] if self.properties.has_key('font'): font = self.properties['font'] del self.properties['font'] else: font = None style = str(self.properties.get('style', '')) if style and style == '0': del self.properties['style'] if 'id' in self.properties: del self.properties['id'] # id has no meaning for XRC # ALB 2004-12-05 if 'events' in self.properties: #del self.properties['events'] # no event handling in XRC for handler, event in self.properties['events'].iteritems(): write(tab_str + '<handler event=%s>%s</handler>\n' % \ (quoteattr(handler), escape(event))) del self.properties['events'] # 'disabled' property is actually 'enabled' for XRC if 'disabled' in self.properties: try: val = int(self.properties['disabled']) except: val = False if val: self.properties['enabled'] = '0' del self.properties['disabled'] # ALB 2007-08-31 extracode property if 'extracode' in self.properties: write(self.properties['extracode'].replace('\\n', '\n')) del self.properties['extracode'] # custom base classes are ignored for XRC... if 'custom_base' in self.properties: del self.properties['custom_base'] if 'extraproperties' in self.properties: prop = self.properties['extraproperties'] del self.properties['extraproperties'] self.properties.update(prop) for name, val in self.properties.iteritems(): self.write_property(str(name), val, out_file, ntabs + 1) # write the font, if present if font: write(tab_str + '<font>\n') tab_str = self.tabs(ntabs + 2) for key, val in font.iteritems(): if val: write(tab_str + '<%s>%s</%s>\n' % \ (escape(key), escape(val), escape(key))) write(self.tabs(ntabs + 1) + '</font>\n') # write the children for c in self.children: self.write_child_prologue(c, out_file, ntabs + 1) c.write(out_file, ntabs + 1) self.write_child_epilogue(c, out_file, ntabs + 1) write(self.tabs(ntabs) + '</object>\n') # end of class DefaultXrcObject class NotImplementedXrcObject(XrcObject): """\ XrcObject used when no code for the widget can be generated (for example, because XRC does not currently handle such widget) """ def __init__(self, code_obj): XRCCodeWriter.XrcObject.__init__(self) self.code_obj = code_obj def write(self, outfile, ntabs): m = 'code generator for %s objects not available' % \ self.code_obj.base self.warning('%s' % m) outfile.write(self.tabs(ntabs) + '<!-- %s -->\n' % m) # end of class NotImplementedXrcObject def __init__(self): BaseCodeWriter.__init__(self) # Inject to all classed derivated from WrcObject XRCCodeWriter.XrcObject.tabs = self.tabs def initialize(self, app_attrs): # initialise parent class BaseCodeWriter.initialize(self, app_attrs) out_path = app_attrs['path'] if self.multiple_files: # for now we handle only single-file code generation raise IOError("XRC code cannot be split into multiple files") self.output_file_name = out_path self.out_file = cStringIO.StringIO() # open(out_path, 'w') self.out_file.write('\n<resource version="2.3.0.1">\n') self.curr_tab = 1 self.xrc_objects = OrderedDict() def finalize(self): # write the code for every toplevel object for obj in self.xrc_objects.itervalues(): obj.write(self.out_file, 1) self.out_file.write('</resource>\n') # store the contents to file self.save_file( self.output_file_name, self.out_file.getvalue() ) def add_app(self, app_attrs, top_win_class): """\ In the case of XRC output, there's no wxApp code to generate """ pass def add_object(self, unused, sub_obj): """\ Adds the object sub_obj to the XRC tree. The first argument is unused. """ # what we need in XRC is not top_obj, but sub_obj's true parent top_obj = sub_obj.parent builder = self.obj_builders.get( sub_obj.base, XRCCodeWriter.DefaultXrcObject ) try: # check whether we already created the xrc_obj xrc_obj = sub_obj.xrc except AttributeError: xrc_obj = builder(sub_obj) # builder functions must return a # subclass of XrcObject sub_obj.xrc = xrc_obj else: # if we found it, remove it from the self.xrc_objects dictionary # (if it was there, i.e. the object is not a sizer), because this # isn't a true toplevel object if sub_obj in self.xrc_objects: del self.xrc_objects[sub_obj] # let's see if sub_obj's parent already has an XrcObject: if so, it is # temporairly stored in the self.xrc_objects dict... if top_obj in self.xrc_objects: top_xrc = self.xrc_objects[top_obj] else: # ...otherwise, create it and store it in the self.xrc_objects dict top_xrc = self.obj_builders.get( top_obj.base, XRCCodeWriter.DefaultXrcObject)(top_obj) top_obj.xrc = top_xrc self.xrc_objects[top_obj] = top_xrc top_obj.xrc.children.append(xrc_obj) def add_sizeritem(self, unused, sizer, obj, option, flag, border): """\ Adds a sizeritem to the XRC tree. The first argument is unused. """ # what we need in XRC is not toplevel, but sub_obj's true parent toplevel = obj.parent top_xrc = toplevel.xrc obj_xrc = obj.xrc try: sizer_xrc = sizer.xrc except AttributeError: # if the sizer has not an XrcObject yet, create it now sizer_xrc = self.obj_builders.get( sizer.base, XRCCodeWriter.DefaultXrcObject)(sizer) sizer.xrc = sizer_xrc # we now have to move the children from 'toplevel' to 'sizer' index = top_xrc.children.index(obj_xrc) if obj.klass == 'spacer': w = obj.properties.get('width', '0') h = obj.properties.get('height', '0') obj_xrc = XRCCodeWriter.SpacerXrcObject( '%s, %s' % (w, h), str(option), str(flag), str(border) ) sizer.xrc.children.append(obj_xrc) else: sizeritem_xrc = XRCCodeWriter.SizerItemXrcObject( obj_xrc, str(option), str(flag), str(border) ) sizer.xrc.children.append(sizeritem_xrc) del top_xrc.children[index] def add_class(self, code_obj): """\ Add class behaves very differently for XRC output than for other lanaguages (i.e. pyhton): since custom classes are not supported in XRC, this has effect only for true toplevel widgets, i.e. frames and dialogs. For other kinds of widgets, this is equivalent to add_object """ if not self.xrc_objects.has_key(code_obj): builder = self.obj_builders.get( code_obj.base, XRCCodeWriter.DefaultXrcObject ) xrc_obj = builder(code_obj) code_obj.xrc = xrc_obj # add the xrc_obj to the dict of the toplevel ones self.xrc_objects[code_obj] = xrc_obj def _format_comment(self, msg): return '<!-- %s -->' % escape(msg.rstrip())
class Artifact(object): HASH_WHITELIST = Constants.ARTIFACT_HASH_WHITELIST MAX_DATA_DICT_DECIMALS = 5 MAX_DATA_DICT_LENGTH = 10 ** MAX_DATA_DICT_DECIMALS META_ATTRS = [ 'additional_inputs', 'binary_input', 'binary_output', 'created_by', 'document_key', 'ext', 'final', 'hashfunction', 'initial', 'logstream', 'key', 'name', 'output_hash', 'state', 'stdout', 'virtual' ] BINARY_EXTENSIONS = [ '.docx', '.eot', '.epub', '.gif', '.gz', '.jpg', '.kch', '.odt', '.pdf', '.png', '.rtf', '.sqlite', '.sqlite3', '.swf', '.tgz', '.ttf', '.wav', '.woff', '.xls', '.zip' ] def __init__(self): if not hasattr(self.__class__, 'FILTERS'): self.__class__.FILTERS = dexy.introspect.filters(Constants.NULL_LOGGER) self._inputs = {} self.additional = None self.additional_inputs = [] self.args = {} self.args['globals'] = {} self.artifacts_dir = 'artifacts' # TODO don't hard code self.batch_id = None self.batch_order = None self.binary_input = None self.binary_output = None self.controller_args = {} self.controller_args['globals'] = {} self.created_by = None self.ctime = None self.data_dict = OrderedDict() self.dexy_version = Version.VERSION self.dirty = False self.document_key = None self.elapsed = 0 self.ext = None self.final = None self.finish_time = None self.hashfunction = 'md5' self.initial = None self.inode = None self.input_data_dict = OrderedDict() self.is_last = False self.key = None self.log = logging.getLogger() self.logstream = "" self.mtime = None self.name = None self.source = None self.start_time = None self.state = 'new' self.stdout = None self.virtual_docs = None def keys(self): return self.data_dict.keys() def may_have_kv_storage(self): return self.binary_output and (self.ext in dexy.helpers.KeyValueData.EXTENSIONS) def __getitem__(self, key): if not hasattr(self, "_storage") and self.binary_output and (self.ext in dexy.helpers.KeyValueData.EXTENSIONS): self.setup_kv_storage() if hasattr(self, "_storage"): if self._storage.mode == "write": # Change from write mode to read mode... self.setup_kv_storage() return self._storage.retrieve(key) elif self.data_dict.has_key(key): return self.data_dict[key] elif hasattr(self, key): return getattr(self, key) elif self.ext in dexy.helpers.KeyValueData.EXTENSIONS: self.setup_kv_storage() return self._storage.retrieve(key) else: raise dexy.commands.UserFeedback("Can't find key '%s' in %s" % (key, self.key)) def __unicode__(self): """ When d[key] is used without attributes being accessed, need to return artifact output text. Jinja calls the __unicode__ method so we override that. """ return self.output_text() def is_complete(self): return str(self.state) == 'complete' @classmethod def retrieve(klass, hashstring, hashfunction='md5'): if not hasattr(klass, 'retrieved_artifacts'): klass.retrieved_artifacts = {} if klass.retrieved_artifacts.has_key(hashstring): return klass.retrieved_artifacts[hashstring] else: artifact = klass() artifact.hashstring = hashstring artifact.hashfunction = hashfunction artifact.load() klass.retrieved_artifacts[hashstring] = artifact return artifact def load(self): self.load_meta() self.load_input() if self.is_complete() and not self.is_loaded(): self.load_output() def load_inputs(self): for a in self.inputs(): a.load() def save(self): if self.is_abstract(): pass # For testing. elif not self.hashstring: raise Exception("can't persist an artifact without a hashstring!") else: self.save_meta() if self.is_complete() and not self.is_output_cached(): try: self.save_output() except IOError as e: print "An error occured while saving %s" % self.key raise e def is_abstract(self): return not hasattr(self, 'save_meta') def filter_args(self): """ Returns args specified in the .dexy file for this filter alias. """ args = {} for a in self.filter_class.ALIASES: if self.args.has_key(a): try: args.update(self.args[a]) except ValueError as e: if "dictionary update sequence element" in e.message: raise dexy.commands.UserFeedback("You need to supply a dict to argument '%s', rather than the single value '%s'" % (a, self.args[a])) else: print self.args[a] raise e return args def setup_initial(self): """ Set up an initial artifact (the first artifact in a document's filter chain). """ if self.args.has_key('binary'): self.binary_input = self.args['binary'] else: self.binary_input = (self.doc.ext in self.BINARY_EXTENSIONS) self.binary_output = self.binary_input self.ext = self.doc.ext self.initial = True self.virtual = self.doc.virtual self.virtual_docs = self.doc.virtual_docs if self.args.has_key('final'): self.final = self.args['final'] elif os.path.basename(self.name).startswith("_"): self.final = False if not self.doc.virtual: stat_info = os.stat(self.name) self.ctime = stat_info[stat.ST_CTIME] self.mtime = stat_info[stat.ST_MTIME] self.inode = stat_info[stat.ST_INO] self.set_data(self.doc.initial_artifact_data()) # TODO remove? if not self.data_dict: raise Exception("no data dict!") elif len(self.data_dict) == 0: raise Exception("data dict has len 0!") self.state = 'complete' def setup_from_filter_class(self): # cache filter class source code so it only has to be calculated once filter_class_source_const = "SOURCE_CODE_%s" % self.filter_class.__name__ if not hasattr(self.filter_class, filter_class_source_const): # get source code of this filter class + all parent filter classes. source = "" klass = self.filter_class # get source code from filter class and all parent classes while klass != dexy.dexy_filter.DexyFilter: source += inspect.getsource(klass) klass = klass.__base__ # and then get source code of DexyFilter class source += inspect.getsource(dexy.dexy_filter.DexyFilter) filter_class_source_hash = self.compute_hash(source) setattr(self.filter_class, filter_class_source_const, filter_class_source_hash) assert filter_class_source_hash == getattr(self.filter_class, filter_class_source_const) self.log.debug("Source code hash for %s is %s" % (self.filter_class.__name__, filter_class_source_hash)) if not hasattr(self.filter_class, 'VERSION'): filter_version = self.filter_class.version(self.log) self.filter_class.VERSION = filter_version self.filter_name = self.filter_class.__name__ self.filter_source = getattr(self.filter_class, filter_class_source_const) self.filter_version = self.filter_class.VERSION if self.final is None: self.final = self.filter_class.FINAL def setup_from_previous_artifact(self, previous_artifact): for a in ['args', 'final', 'mtime', 'ctime', 'inode', 'virtual', 'virtual_docs']: setattr(self, a, getattr(previous_artifact, a)) # Look for additional inputs in previous artifacts or previous # artifacts' inputs. for k, a in previous_artifact.inputs().iteritems(): if a.additional and not k in self._inputs: self.add_input(k, a) elif not k in self._inputs and not a.virtual: # We should have all other inputs already. Validate this. raise Exception("Missing input %s" % k) for kk, aa in a.inputs().iteritems(): if aa.additional and not kk in self._inputs: self.add_input(kk, aa) self.binary_input = previous_artifact.binary_output self.input_data_dict = previous_artifact.data_dict self.input_ext = previous_artifact.ext self.previous_artifact_hashstring = previous_artifact.hashstring self.previous_artifact_filename = previous_artifact.filename() self.previous_artifact_filepath = previous_artifact.filepath() self.previous_canonical_filename = previous_artifact.canonical_filename(True) self.previous_long_canonical_filename = previous_artifact.long_canonical_filename() self.previous_websafe_key = previous_artifact.websafe_key() # The JSON output of previous artifact if not previous_artifact.binary_output: self.previous_cached_output_filepath = previous_artifact.cached_output_filepath() # Determine file extension of output if hasattr(self, 'next_filter_class'): next_inputs = self.next_filter_class.INPUT_EXTENSIONS else: next_inputs = None if self.filter_args().has_key('ext'): ext = self.filter_args()['ext'] if not ext.startswith("."): ext = ".%s" % ext self.ext = ext else: self.ext = self.filter_class.output_file_extension( previous_artifact.ext, self.name, next_inputs) self.binary_output = self.filter_class.BINARY if self.binary_output is None: self.set_binary_from_ext() self.state = 'setup' @classmethod def setup(klass, doc, artifact_key, filter_class = None, previous_artifact = None): """ Create an Artifact instance and load all information needed to calculate its hashstring. """ artifact = klass() artifact.key = artifact_key artifact.filter_class = filter_class artifact.is_last = (artifact.key == doc.key()) # Add references for convenience artifact.artifacts_dir = doc.artifacts_dir artifact.controller_args = doc.controller.args artifact.hashfunction = doc.controller.args['hashfunction'] artifact.db = doc.db artifact.doc = doc artifact.log = doc.log # These attributes are the same for all artifacts pertaining to a document artifact.args = doc.args artifact.batch_id = doc.batch_id artifact.document_key = doc.key() artifact.name = doc.name # Set batch order to next in sequence artifact.batch_order = artifact.db.next_batch_order(artifact.batch_id) next_filter_class = doc.next_filter_class() if next_filter_class: artifact.next_filter_name = next_filter_class.__name__ artifact.next_filter_class = next_filter_class # Set inputs from original document inputs. artifact._inputs.update(artifact.doc.input_artifacts()) if len(artifact.doc.input_artifacts().keys()) > 10: doc.log.debug("Setting inputs to include %s document inputs" % len(artifact.doc.input_artifacts())) elif len(artifact.doc.input_artifacts().keys()) > 0: doc.log.debug("Setting inputs to include inputs: %s" % ",".join(artifact.doc.input_artifacts().keys())) for k, a in artifact.doc.input_artifacts().iteritems(): if a.additional and not k in artifact._inputs: doc.log.debug("Adding additional input %s" % k) artifact.add_input(k, a) for kk, aa in a.inputs().iteritems(): if aa.additional and not kk in artifact._inputs: doc.log.debug("Adding additional input %s" % kk) artifact.add_input(kk, aa) if previous_artifact: artifact.setup_from_previous_artifact(previous_artifact) artifact.setup_from_filter_class() else: artifact.setup_initial() artifact.set_hashstring() return artifact def run(self): start = time.time() if self.controller_args['nocache'] or not self.is_complete(): # We have to actually run things... if not self.filter_class: self.filter_class = dexy.introspect.get_filter_by_name(self.filter_name, self.doc.__class__.filter_list) # Set up instance of filter. filter_instance = self.filter_class() filter_instance.artifact = self filter_instance.log = self.log # Make sure previous artifact is loaded. if not self.binary_input and len(self.input_text()) == 0: f = open(self.previous_artifact_filepath, "rb") self.data_dict['1'] = f.read() f.close() try: filter_instance.process() except dexy.commands.UserFeedback as e: messages = [] err_msg_args = (self.doc.key(), self.filter_alias, self.doc.step, len(self.doc.filters)) messages.append("ERROR in %s (in filter '%s' - step %s of %s)" % err_msg_args) messages.append(e.message) for message in messages: self.log.debug(message) messages.append("This exception information has been written to logs/dexy.log") messages.append("There may be more information in logs/dexy.log") if self.log.getEffectiveLevel() > logging.DEBUG: messages.append("If you can't find clues in the log, try running again with -loglevel DEBUG") raise dexy.commands.UserFeedback("\n".join(messages)) except dexy.commands.InternalDexyProblem as e: err_msg_args = (self.doc.key(), self.filter_alias, self.doc.step, len(self.doc.filters)) sys.stderr.write("ERROR in %s (in filter '%s' - step %s of %s)\n" % err_msg_args) raise e except Exception as e: traceback.print_tb(sys.exc_info()[2]) err_msg_args = (self.doc.key(), self.filter_alias, self.doc.step, len(self.doc.filters)) sys.stderr.write("ERROR in %s (in filter '%s' - step %s of %s)\n" % err_msg_args) if e.message: raise dexy.commands.InternalDexyProblem("error class: %s\nerror message: %s" % (e.__class__.__name__, e.message)) else: raise dexy.commands.InternalDexyProblem("error class: %s" % e.__class__.__name__) if self.data_dict and len(self.data_dict) > 0: pass elif self.is_canonical_output_cached: self.state = 'complete' self.save() else: raise Exception("data neither in memory nor on disk") self.logstream = self.doc.logstream.getvalue() self.state = 'complete' self.source = 'run' self.save() else: self.source = 'cache' self.log.debug("using cached artifact for %s" % self.key) # make sure additional artifacts are added to db for a in self.inputs().values(): if a.additional and not a.key in self.db.extra_keys: a.batch_id = self.batch_id self.db.append_artifact(a) self.elapsed = time.time() - start self.db.update_artifact(self) def add_additional_artifact(self, key_with_ext, ext=None): if not ext: ext = os.path.splitext(key_with_ext)[1] new_artifact = self.__class__() new_artifact.key = key_with_ext if ext.startswith("."): new_artifact.ext = ext else: new_artifact.ext = ".%s" % ext new_artifact.final = True new_artifact.hashfunction = self.hashfunction new_artifact.additional = True new_artifact.set_binary_from_ext() new_artifact.artifacts_dir = self.artifacts_dir new_artifact.inode = self.hashstring new_artifact.created_by = self.key new_artifact.virtual = True new_artifact.name = key_with_ext.split("|")[0] # TODO this is duplicated in setup_from_previous_artifact, should reorganize for at in ['batch_id', 'document_key', 'mtime', 'ctime', 'virtual_docs']: val = getattr(self, at) setattr(new_artifact, at, val) new_artifact.set_hashstring() self.log.debug("new artifact %s hashstring %s" % (key_with_ext, new_artifact.hashstring)) self.add_input(key_with_ext, new_artifact) self.db.append_artifact(new_artifact) # append to db because not part of doc.artifacts return new_artifact def add_input(self, key, artifact): self._inputs[key] = artifact self.additional_inputs.append(artifact.hashstring) def inputs(self): return self._inputs def set_binary_from_ext(self): # TODO list more binary extensions or find better way to do this if self.ext in self.BINARY_EXTENSIONS: self.binary_output = True else: self.binary_output = False def set_data(self, data): self.data_dict['1'] = data def set_data_from_artifact(self): f = codecs.open(self.filepath(), "r", encoding="utf-8") self.data_dict['1'] = f.read() def is_loaded(self): return hasattr(self, 'data_dict') and len(self.data_dict) > 0 def compute_hash(self, text): unicode_text = None if type(text) == unicode: unicode_text = text elif type(text) in [dict, list]: unicode_text = json.dumps(text) elif self.binary_input: pass else: unicode_text = unicode(text, encoding="utf-8") if unicode_text: text = unicode_text.encode("utf-8") if self.hashfunction == 'md5': h = hashlib.md5(text).hexdigest() elif self.hashfunction == 'sha1': h = hashlib.sha1(text).hexdigest() elif self.hashfunction == 'sha224': h = hashlib.sha224(text).hexdigest() elif self.hashfunction == 'sha256': h = hashlib.sha256(text).hexdigest() elif self.hashfunction == 'sha384': h = hashlib.sha384(text).hexdigest() elif self.hashfunction == 'sha512': h = hashlib.sha512(text).hexdigest() elif self.hashfunction == 'crc32': h = str(zlib.crc32(text) & 0xffffffff) elif self.hashfunction == 'adler32': h = str(zlib.adler32(text) & 0xffffffff) else: raise Exception("unexpected hash function %s" % self.hashfunction) return h def input_hashes(self): """ Returns an OrderedDict of key, hashstring for each input artifact, sorted by key. """ return OrderedDict((k, str(self.inputs()[k].hashstring)) for k in sorted(self.inputs())) def hash_dict(self): """ Calculate and cache the elements used to compute the hashstring """ if not hasattr(self.__class__, 'SOURCE_CODE'): artifact_class_source = inspect.getsource(self.__class__) artifact_py_source = inspect.getsource(Artifact) self.__class__.SOURCE_CODE = self.compute_hash(artifact_class_source + artifact_py_source) self.artifact_class_source = self.__class__.SOURCE_CODE if self.dirty: self.dirty_string = time.gmtime() hash_dict = OrderedDict() hash_dict['inputs'] = self.input_hashes() for k in self.HASH_WHITELIST: if self.__dict__.has_key(k): v = self.__dict__[k] if hasattr(v, 'items'): hash_v = OrderedDict() for k1 in sorted(v.keys()): v1 = v[k1] try: if len(str(v1)) > 50: raise Exception() json.dumps(v1) hash_v[str(k1)] = v1 except Exception: # Use a hash if we will have problems saving to JSON # or if the data is large (don't want to clutter up the DB, # makes it harder to spot differences) hash_v[str(k1)] = self.compute_hash(v1) else: hash_v = str(v) hash_dict[str(k)] = hash_v return hash_dict def set_hashstring(self): if hasattr(self, 'hashstring'): raise Exception("setting hashstring twice") hash_data = str(self.hash_dict()) self.hashstring = self.compute_hash(hash_data) try: original_document_key = self.document_key if not self.is_loaded(): self.load() self.document_key = original_document_key except AttributeError as e: if not self.is_abstract(): raise e except IOError as e: self.save_meta() def convert_if_not_unicode(self, s): if type(s) == unicode: return s elif s == None: return u"" else: try: ut = unicode(s, encoding="utf-8") return ut except Exception as e: print "error occurred trying to convert text to unicode in", self.key raise e def input_text(self): return u"".join([self.convert_if_not_unicode(v) for k, v in self.input_data_dict.items()]) def output_text(self): return u"".join([self.convert_if_not_unicode(v) for k, v in self.data_dict.items()]) def read_binary_output(self): self.binary_output = True self.load_output() self.binary_output = False return self.binary_data def output(self): if not self.is_complete(): raise Exception("can't call output unless complete!") if self.binary_output: if not hasattr(self, 'binary_data'): self.load_output() return self.binary_data else: return self.output_text() def relative_refs(self, relative_to_file): """How to refer to this artifact, relative to another.""" doc_dir = os.path.dirname(relative_to_file) return [ os.path.relpath(self.key, doc_dir), os.path.relpath(self.long_canonical_filename(), doc_dir), "/%s" % self.key, "/%s" % self.long_canonical_filename() ] def use_canonical_filename(self): """Returns the canonical filename after saving contents under this name in the artifacts directory.""" self.write_to_file(os.path.join(self.artifacts_dir, self.canonical_filename())) return self.canonical_filename() def write_to_file(self, filename): dirname = os.path.dirname(filename) if not os.path.exists(dirname) and not dirname == '': os.makedirs(dirname) shutil.copyfile(self.filepath(), filename) def work_filename(self): return "%s.work%s" % (self.hashstring, self.input_ext) def generate_workfile(self, work_filename = None): if not work_filename: work_filename = self.work_filename() work_path = os.path.join(self.artifacts_dir, work_filename) work_file = codecs.open(work_path, "w", encoding="utf-8") work_file.write(self.input_text()) work_file.close() def temp_filename(self, ext): return "%s.work%s" % (self.hashstring, ext) def open_tempfile(self, ext): tempfile_path = os.path.join(self.artifacts_dir, self.temp_filename(ext)) codecs.open(tempfile_path, "w", encoding="utf-8") def temp_dir(self): return os.path.join(self.artifacts_dir, self.hashstring) def create_temp_dir(self, populate=False): tempdir = self.temp_dir() shutil.rmtree(tempdir, ignore_errors=True) os.mkdir(tempdir) if populate: # write all inputs to this directory, under their canonical names for input_artifact in self._inputs.values(): filename = os.path.join(tempdir, input_artifact.canonical_filename()) if os.path.exists(input_artifact.filepath()): input_artifact.write_to_file(filename) self.log.debug("Populating temp dir for %s with %s" % (self.key, filename)) else: self.log.warn("Not populating temp dir for %s with file %s, file does not exist (yet)" % (self.key, filename)) # write the workfile to this directory under its canonical name previous = self.previous_artifact_filepath workfile = os.path.join(tempdir, self.previous_canonical_filename) if not os.path.exists(os.path.dirname(workfile)): os.makedirs(os.path.dirname(workfile)) self.log.debug("Copying %s to %s" % (previous, workfile)) shutil.copyfile(previous, workfile) def alias(self): """ Whether this artifact includes an alias. """ aliases = [k for k in self.key.split("|") if k.startswith("-")] if len(aliases) > 0: return aliases[0] def canonical_dir(self, ignore_args = False): return os.path.dirname(self.name) def canonical_basename(self, ignore_args = False): return os.path.basename(self.canonical_filename(ignore_args)) def canonical_filename(self, ignore_args = False): fn = os.path.splitext(self.key.split("|")[0])[0] if self.args.has_key('canonical-name') and not ignore_args: parent_dir = os.path.dirname(fn) return os.path.join(parent_dir, self.args['canonical-name']) elif self.args.has_key('postfix') and not ignore_args: return "%s%s%s" % (fn, self.ext, self.args['postfix']) elif self.alias(): return "%s%s%s" % (fn, self.alias(), self.ext) else: return "%s%s" % (fn, self.ext) def long_canonical_filename(self): if not "|" in self.key: return self.key.replace("|", "-") else: return "%s%s" % (self.key.replace("|", "-"), self.ext) def websafe_key(self): return self.long_canonical_filename().replace("/", "--") def web_safe_document_key(self): # duplicate, remove this alias return self.websafe_key() def filename(self): """ The filename where artifact content is stored, based on the hashstring. """ if not hasattr(self, 'ext'): raise Exception("artifact %s has no ext" % self.key) return "%s%s" % (self.hashstring, self.ext) def filepath(self): """ Full path (including artifacts dir location) to location where artifact content is stored. """ return os.path.join(self.artifacts_dir, self.filename()) def abs_filepath(self): return os.path.abspath(self.filepath()) def breadcrumbs(self): """A list of parent dirs, plus the filename if it's not 'index.html'.""" parent_dirs = os.path.dirname(self.canonical_filename()).split("/") if self.canonical_basename() == "index.html": result = parent_dirs else: result = parent_dirs.append(self.canonical_basename()) if not result: result = [] return result def titleized_name(self): if self.canonical_basename() == "index.html": return self.breadcrumbs()[-1].replace("-"," ").title() else: return os.path.splitext(self.canonical_basename())[0].replace("-"," ").title() def unique_key(self): return "%s:%s:%s" % (self.batch_id, self.document_key, self.key) def websafe_unique_key(self): return self.unique_key().replace("/", "--") def url(self): # TODO test for final return "/%s" % self.canonical_filename() def hyperlink(self, link_text = None): # TODO test for final if not link_text: link_text = self.canonical_basename() return """<a href="%s">%s</a>""" % (self.url(), link_text) def iframe(self, link_text = None, width = "600px", height = "300px"): # TODO test for final args = { 'url' : self.url(), 'hyperlink' : self.hyperlink(link_text), 'width' : width, 'height' : height } return """ <iframe src="%(url)s" width="%(width)s" height="%(height)s" style="border: thin solid gray;"> %(hyperlink)s </iframe> """ % args def img(self): # TODO test for final return """<img src="/%s" alt="Image generated by dexy %s" />""" % (self.canonical_filename(), self.key) def relpath(self, artifact_key): """ Returns relative path from self to other artifact key, e.g. for linking to CSS relatively """ artifact = self.inputs()[artifact_key] return os.path.join(self.relative_path_to_input(artifact), artifact.canonical_basename()) def has_sections(self): return (self.data_dict.keys() != ['1']) def relative_path_to_input(self, input_artifact): my_dir = os.path.dirname(self.name) input_dir = os.path.dirname(input_artifact.name) self.log.debug("Calculating relative path between %s and %s" % (self.name, input_artifact.name)) if not my_dir: my_dir = "." if not input_dir: input_dir = "." if my_dir == input_dir: relpath = "" else: relpath = os.path.relpath(input_dir, my_dir) return relpath def relative_key_for_input(self, input_artifact): relpath = self.relative_path_to_input(input_artifact) return os.path.join(relpath, os.path.basename(input_artifact.key)) def convert_numbered_dict_to_ordered_dict(self, numbered_dict): ordered_dict = OrderedDict() for x in sorted(numbered_dict.keys()): k = x.split(":", 1)[1] ordered_dict[k] = numbered_dict[x] return ordered_dict def convert_data_dict_to_numbered_dict(self): if len(self.data_dict) >= self.MAX_DATA_DICT_LENGTH: exception_msg = """Your data dict has %s items, which is greater than the arbitrary limit of %s items. You can increase this limit by changing MAX_DATA_DICT_DECIMALS.""" raise Exception(exception_msg % (len(self.data_dict), self.MAX_DATA_DICT_LENGTH)) data_dict = {} i = -1 for k, v in self.data_dict.iteritems(): i += 1 fmt = "%%0%sd:%%s" % self.MAX_DATA_DICT_DECIMALS data_dict[fmt % (i, k)] = v return data_dict def storage(self, reset=False): if not hasattr(self, "_storage") or reset: # Assume we want KV storage self.setup_kv_storage() return self._storage def key_prefixes(self): return sorted(set(":".join(k.split(":")[:-1]) for k in self.storage().keys())) def kv_storage(self): return self.storage() def row_storage(self): if not hasattr(self, "_storage"): self.setup_row() return self._storage def setup_kv_storage(self): try: self._storage = dexy.helpers.KeyValueData(self.filepath()) except ValueError as e: raise dexy.commands.UserFeedback("Can't get key-value data from %s for %s: %s" % self.filepath(), self.key, e.message) def setup_row_storage(self): self._storage = dexy.helpers.RowData(self.filepath())
class SvxlinkTypeContainer(object): def __init__(self, type_name, section_name, valid_options, data=None): """Type container for Svxlink Types. It serves as an abstract class. We need to check for valid options when setting sections. Instead of copying/pasting the code, this abstract class checks for valid options when setting an item in class. __dbase__ is an internal representation of key/values with an OrderedDict. If data is provided, __dbase__ is filled with this data. Otherwise, we know that the section is created from scratch so we add TYPE accordingly to type_name argument by default. "Data" is an array of tuple because it's the type that ConfigParser returns. For the ease of use, it's directly used in this way. Note that svxlink.conf requires UPPERCASE for options in sections. Section names can be arbitrary, however, options are presented upper-case. Whenever you set an option name, it will be converted to uppercase. It does not matter the way you set options. For example: f = SvxlinkTypeNet("foo") f["tcp_PORT" = 5220 is converted to: f["TCP_PORT"] 5220 so it's still valid to use as long as option is present in VALID_OPTIONS """ # TODO: Implement a checker function. Now, we only check for # valid options, not the values themselves. Later, we would need # to check for values so we need to implement a function that # checks it. Additionally, this function will be unique to the # classes that extends SvxlinkTypeContainer. So it should be # optional in __init__() self._VALID_OPTIONS = valid_options self._TYPE_NAME = type_name self._SECTION_NAME = section_name # internal ordered dictionary for storing key/values typical to # section self.__dbase__ = OrderedDict() if data is None: self.__dbase__.update({"TYPE": type_name}) else: # start adding values that are in tuple to __dict__ for tuple_item in data: self.__check_item_and_update(tuple_item[0], tuple_item[1]) def __check_item_and_update(self, key, val): """Checks the item in VALID_OPTIONS and updates __dbase__ if the option is valid. """ if not key.upper() in self._VALID_OPTIONS: raise ValueError("Option '%s' is not valid for '%s'" % (key, self._SECTION_NAME)) self.__dbase__.update({key.upper(): val}) def __str__(self): return "<SvxlinkType%s: %s>" % (self._TYPE_NAME, self._SECTION_NAME) def __getitem__(self, key): return self.__dbase__.get(key.upper()); def __setitem__(self, key, val): self.__check_item_and_update(key, val) def __eq__(self, other): # compare any object with our section name. return other == self._SECTION_NAME def get_section_name(self): """Returns a section name""" return self._SECTION_NAME def has_option(self, option): """Checks if there is an option in __dict__ """ return self.__dbase__.has_key(option.upper()) def items(self): """Returns ConfigParser compatable output for items in this section. The output is an array of tuples such as: [(tcp_port, 5220), (type, "Net")] """ # iterate over __dict__, do not take variables that start with _ # into account. output = [] for item in self.__dbase__: if not item.startswith("_"): output.append((item, self[item])) return output def is_online(self): """An abstract method for checking if the section is up. This method should be implemented in SvxlinkType objects. By default, it returns true. For example, for a SvxlinkTypeRepeater, is_online() method can check if the repeater is in LOGICS option in GLOBAL section. For a Local device, this method can check if the card is listed by ALSA and can be accessed without a problem. """ return True
class Dependency: ## Construct an empty dependency tree # @param self the object reference # @param silent minimal feedback # @param autobuild warn rather than fail on multiple version dependnecies. XXX def __init__(self, silent=True, autobuild=False): ## The ASKAP top-level directory self.ASKAPROOT = os.environ.get("ASKAP_ROOT") if self.ASKAPROOT is None: msg = "ASKAP_ROOT environment variable is not defined" raise BuildError(msg) # self.DEPFILE = "dependencies" # The basename of the dependency file self.INSTALL_SUBDIR = "install" self._deps = OrderedDict() # self._bindirs = [] self._incdirs = [] self._libdirs = [] self._rootdirs = [] # self._cppflags = [] # XXX "defs" in package.info. LOFAR/log4cxx # self._env = [] self._jars = [] self._libs = [] self._packages = [] # self._ldlibpath = "" self._pypath = "" # self._autobuild = autobuild self._silent = silent # mimimal output self.selfupdate = False # should object request updates from svn def q_print(self, msg): if self._silent: return utils.q_print(msg) ## Get the path of the specified dependency package # @param self the current object # @param key the label of the package dependency # @return the path (relative to ASKAP_ROOT) to the package def get_dep_path(self, key): return self._deps[key]["path"] # Used by "in" test. # object.__contains__(self, item) # # Called to implement membership test operators. Should return true if item # is in self, false otherwise. For mapping objects, this should consider # the keys of the mapping rather than the values or the key-item pairs. # # For objects that do not define __contains__(), the membership test first # tries iteration via __iter__(), then the old sequence iteration protocol # via __getitem__(), see this section in the language reference. # # http://docs.python.org/reference/datamodel.html def __contains__(self, key): return self._deps.has_key(key) ## Get the absolute path to the dependency packages installed location # @param self the current object # @param key the label of the package dependency # @return the absolute path to the package installed location def get_install_path(self, key): rel_path = self._deps[key]["path"] full_path = os.path.join(self.ASKAPROOT, rel_path, self.INSTALL_SUBDIR) return os.path.abspath(full_path) def get_path(self): return os.path.pathsep.join(self._bindirs) ## Get the CPPFLAGS retrieved in the dependency analysis # @param self the object reference # @return a list of library names def get_libs(self, mapped=False): if mapped: return self._libs[:] else: return [ m[0] for m in self._libs ] ## Get the environment variables retrieved in the dependency analysis # @param self the object reference # @return a dictionary of ENVVAR => value pairs def get_env(self): return dict([i.split("=") for i in self._env]) ## Get the the java classpath for the depencies # @param self the object reference # @return a classpath string of the form x/y/z.jar:a/b/c.jar def get_classpath(self): return os.path.pathsep.join(self._jars) ## Get the root directories of the tags retrieved in the dependency analysis # @param self the object reference # @return a list of directory names def get_rootdirs(self, mapped=False): # XXX used in ant.py builder with mapped=true. if mapped: return [ (k, os.path.join( self.ASKAPROOT, v['path'])) \ for k,v in self._deps.iteritems()] return self._rootdirs[:] ## Get the LIBRARY directories retrieved in the dependency analysis # @param self the object reference # @param mapped return directory tuples (rootdir, libdir) # @return a list of library directories or tuples of rootdirs and libdirs def get_librarydirs(self, mapped=False): if mapped: return self._libdirs[:] else: return [ m[0] for m in self._libdirs ] ## Get the LD_LIBRARY_PATH accumulated in the dependency analysis # @param self the object reference # @return a string representing the LD_LIBRARY_PATH def get_ld_library_path(self): return self._ldlibpath.strip(":") ## Get the INCLUDE directories retrieved in the dependency analysis # @param self the object reference # @return a list of header file directories def get_includedirs(self): return self._incdirs[:] ## Get the CPPFLAGS retrieved in the dependency analysis # @param self the object reference # @return a list preprocessor flags def get_cppflags(self): return self._cppflags[:] def get_pythonpath(self): return self._pypath.strip(":") ## Get a list of doxygen tag files in the dependencies. This is used for # cross-referencing the documentation # @todo Re-enable: This has been disabled until it is working for python # @param self the object reference # @return a list of TAGFILES entries # XXX used only in scons_tools/askap_package.py def get_tagfiles(self): tagfiles = [] for pth in self._rootdirs: tagname = utils.tag_name(pth) tagpath = os.path.join(pth, tagname) if os.path.exists(tagpath): tagfiles.append('"%s=%s/html"' % (tagpath, pth) ) return tagfiles def _get_dependencies(self, package): codename = utils.get_platform()['codename'] hostname = socket.gethostname().split(".")[0] for ext in ['default', codename, hostname]: if ext: depfile = '%s.%s' % (self.DEPFILE, ext) if package: depfile = os.path.join(self.ASKAPROOT, package, depfile) if self.selfupdate: # always update if it is the "root/target" package basedir = os.path.split(depfile)[0] or "." if not os.path.exists(basedir): utils.update_tree(basedir) self._get_depfile(depfile) def _get_depfile(self, depfile, overwrite=False): if not os.path.exists(depfile): # assume no dependencies return dfh = file(depfile) for line in dfh.readlines(): line = line.strip() if line.startswith("#"): continue kv = line.split("=", 1) if len(kv) == 2: key = kv[0].strip() value = kv[1].strip() # see if the file explicitly names any libs lspl = value.split(";") libs = None if len(lspl) > 1: libs = lspl[1].strip().split() value = lspl[0] self._add_dependency(key, value, libs, overwrite) if not value.startswith("/"): # recurse into ASKAP dependencies # otherwise just move on as we specified system dependency # which will not have a dependency file self._packages.append(value) self._get_dependencies(value) dfh.close() def _get_info(self, packagedir): info = { # A single directory path relative to the install directory. 'bindir': 'bin', 'distdir': 'dist', 'incdir': 'include', 'libdir': 'lib', # Space separated lists. XXX Default should be '[]'? 'defs' : None, 'env': None, 'jars': None, 'libs': None, # Define a single python module name and version. # e.g. pymodule=numpy==1.2.0 'pymodule': None, } sslists = ['defs', 'env', 'jars', 'libs'] infofile = os.path.join(packagedir, 'package.info') if os.path.exists(infofile): f = file(infofile) for line in f.readlines(): line = line.strip() if line.startswith("#"): continue kv = line.split("=", 1) if len(kv) == 2: key = kv[0].strip() value = kv[1].strip() if key in info.keys(): if key in sslists: info[key] = value.split() else: info[key] = value f.close() return info def _add_dependency(self, key, value, libs, overwrite=False): if self._deps.has_key(key): # deal with potential symbolic links for 'default' packages paths = [self._deps[key]["path"], value] outpaths = [] for pth in paths: if not pth.startswith("/"): pth = os.path.join(os.environ["ASKAP_ROOT"], pth) pth = os.path.realpath(pth) outpaths.append(pth) if outpaths[0] == outpaths[1]: if libs: if self._deps[key]["libs"] is not None: # prepend the libs self._deps[key]["libs"] = libs + self._deps[key]["libs"] else: self._deps[key]["libs"] = libs self._deps.toend(key) else: # another dependency, so move it to the end, so link # order is correct self._deps.toend(key) return else: if overwrite: self._deps[key]["path"] = value self.q_print("info: Overwriting default package dependency '%s' with host specific package (from %s)" % (key, value) ) elif self._autobuild: # XXX maybe a mistake? self.q_print("warn: Possible multiple version dependency \n\ %s != %s" % (self._deps[key]["path"], value)) else: raise BuildError("Multiple version dependency \n\ %s != %s" % (self._deps[key]["path"], value)) else: self.q_print("info: Adding package dependency '%s' (from %s)" % (key, value)) # now update the dependency itself # XXX only used in Tools/scons_tools/askap_package.py if self.selfupdate: utils.update_tree(value) self._deps[key] = {"path": value, "libs": libs} def _remove_duplicates(self, values): # find unique elements libs = [v[0] for v in values] for k in set(libs): # remove all but last duplicate entry while libs.count(k) > 1: idx = libs.index(k) libs.pop(idx) values.pop(idx) ## Add a ThirdPartyLibrary or ASKAP package to the environment # This will add the package path in ASKAP_ROOT # @param self the object reference # @param pkgname The name of the package as in the repository, e.g. # lapack. Default None means that this is defined in local # dependencies.xyz # @param tag The location of the package, e.g. # 3rdParty/lapack-3.1.1/lapack-3.1.1 # @param libs The name of the libraries to link against, # default None is the same as the pkgname # @param libdir The location of the library dir relative to the package, # default None which will use settings in the package.info file # @param incdir The location of the include dir relative to the package, # default None which will use settings in the package.info file # @param pymodule the 'require' statement to specify this dependency # statement, e.g. "askap.loghandlers==current" def add_package(self, pkgname=None, tag=None, libs=None, libdir=None, incdir=None, bindir=None, pymodule=None): self._deps = OrderedDict() if pkgname: if not tag: BuildError("No tag specified") if self.selfupdate: #if not os.path.exists(tag): utils.update_tree(tag) self._add_path(pkgname, self.ASKAPROOT, tag, libs, libdir, incdir, bindir, pymodule) self.q_print("info: Adding package '%s'" % pkgname) if tag: tag = os.path.join(self.ASKAPROOT, tag) self._get_dependencies(tag) parent = '' for key, value in self._deps.iteritems(): self._add_path(key, self.ASKAPROOT, value["path"], libs=value["libs"], parent=parent) parent = value["path"] # Add a ASKAP repository path to the environment # This sets up LIBPATH and CPPPATH def _add_path(self, pkgname, root, tag, parent='', libs=None, libdir=None, incdir=None, bindir=None, pymodule=None): loc = None if tag.startswith("/"): # external package loc = tag else: # ASKAP package or 3rdParty library loc = os.path.join(root, tag) rloc = os.path.relpath(loc, self.ASKAPROOT) if not os.path.exists(loc): raise BuildError("Dependency directory '%s' does not exist (requested by %s)." % (rloc,parent)) self._rootdirs += [loc] info = self._get_info(loc) # get optional package info idir = os.path.join(loc, self.INSTALL_SUBDIR) # actual installion. if not bindir: # add bin directory bindir = info["bindir"] if bindir: # None means disabled in info file pth = os.path.join(idir, bindir) if os.path.exists(pth): self._bindirs += [pth] if not incdir: # add include directory incdir = info["incdir"] if incdir: # None means disabled in info file pth = os.path.join(idir, incdir) if not os.path.exists(pth): if not pymodule: self.q_print("warn: incdir '%s' does not exist." % pth) else: self._incdirs += [pth] if not libdir: # add library directory libdir = info["libdir"] if libdir: # None means disabled in info file pth = os.path.join(idir, libdir) if not os.path.exists(pth): if not pymodule: self.q_print("warn: libdir '%s' does not exist." % pth) else: self._ldlibpath += os.path.pathsep+pth self._libdirs += [(pth, idir)] libs = libs or info["libs"] addlibs = True if isinstance(libs, list) and len(libs) == 0: addlibs = False libs = libs or pkgname if not isinstance(libs, list): libs = [libs] if addlibs: # only add lib if it's not a python module nlibs = [] for lib in libs: instdir = idir if not glob.glob("{0}/lib{1}*".format(os.path.join(idir, libdir), lib)): instdir = "" nlibs.append((lib, instdir)) self._libs += nlibs libs = self._libs[:] # copy self._remove_duplicates(libs) self._libs = libs if info["defs"]: # add package defines self._cppflags += info["defs"] if info["env"]: # add environment variables self._env += info["env"] # check whether it is python, i.e. pymodule entry in package.info if not pymodule: pymodule = info["pymodule"] if pymodule: pth = os.path.join(idir, libdir, utils.get_site_dir()) if self._pypath.find(pth) < 1: self._pypath = os.path.pathsep.join([pth, self._pypath]) if info["jars"]: pth = os.path.join(idir, libdir) if not os.path.exists(pth): if not pymodule: self.q_print("warn: libdir '%s' does not exist." % pth) for jar in info["jars"]: jar = os.path.join(pth, jar) if jar not in self._jars: self._jars.append(jar)
policy.set_action('permit') if re.search('dst',x[8]) != None: policy.set_nat('d') elif re.search('src',x[8]) != None: policy.set_nat('s') if re.search('log',x[8]) != None: policy.set_log() if re.search('count',x[8]) != None: policy.set_count() if re.search('no-session-backup',x[8]) != None: policy.set_nosb() if re.search('webauth',x[8]) != None: policy.set_webauth() policy.set_action(policy.action + "~") if policy_dict.has_key(policy.id): sys.stderr.write("Duplicate policy id entry detected: %s\n" % policy.id) continue policy_dict[policy.id] = policy continue # marks the beginning of a policy statement with extra configuration items # ['Id',None] x = policy_regex_begin.split(line)[1:-1] if len(x): id = x[0] if x[1] == "disable": policy_dict[id].set_disable() config_iter.next() policy_line = config_iter.next()
class GenerateWebPages(object): def __init__(self, getConfigValue, pageDir, resourceNames, pageTitle, pageSubTitle, pageVersion, extraVersions, descriptionInfo): self.pageTitle = pageTitle self.pageSubTitle = pageSubTitle self.pageVersion = pageVersion self.extraVersions = extraVersions self.pageDir = pageDir self.pagesOverview = OrderedDict() self.pagesDetails = OrderedDict() self.getConfigValue = getConfigValue self.resourceNames = resourceNames self.descriptionInfo = descriptionInfo self.diag = logging.getLogger("GenerateWebPages") def makeSelectors(self, subPageNames, tags=[]): allSelectors = [] firstSubPageName = self.getConfigValue("historical_report_subpages", "default")[0] for subPageName in subPageNames: if subPageName == firstSubPageName: suffix = "" else: suffix = "_" + subPageName.lower() allSelectors.append(Selector(subPageName, suffix, self.getConfigValue, tags)) return allSelectors def generate(self, repositoryDirs, subPageNames): foundMinorVersions = {} allMonthSelectors = set() latestMonth = None pageToGraphs = {} for version, repositoryDirInfo in repositoryDirs.items(): self.diag.info("Generating " + version) allFiles, tags = self.findTestStateFilesAndTags(repositoryDirInfo) if len(allFiles) > 0: selectors = self.makeSelectors(subPageNames, tags) monthSelectors = SelectorByMonth.makeInstances(tags) allMonthSelectors.update(monthSelectors) allSelectors = selectors + list(reversed(monthSelectors)) # If we already have month pages, we only regenerate the current one if len(self.getExistingMonthPages()) == 0: selectors = allSelectors else: currLatestMonthSel = monthSelectors[-1] if latestMonth is None or currLatestMonthSel.linkName == latestMonth: selectors.append(monthSelectors[-1]) latestMonth = currLatestMonthSel.linkName tags = list(reduce(set.union, (set(selector.selectedTags) for selector in selectors), set())) tags.sort(self.compareTags) loggedTests = OrderedDict() categoryHandlers = {} for stateFile, repository in allFiles: tag = self.getTagFromFile(stateFile) if len(tags) == 0 or tag in tags: testId, state, extraVersion = self.processTestStateFile(stateFile, repository) loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state, extraVersion) versionToShow = self.removePageVersion(version) for resourceName in self.resourceNames: hasData = False for sel in selectors: filePath = self.getPageFilePath(sel, resourceName) if self.pagesOverview.has_key(filePath): _, page, pageColours = self.pagesOverview[filePath] else: page = self.createPage(resourceName) pageColours = set() self.pagesOverview[filePath] = resourceName, page, pageColours for cellInfo in self.getCellInfoForResource(resourceName): tableHeader = self.getTableHeader(resourceName, cellInfo, version, repositoryDirs) heading = self.getHeading(resourceName, versionToShow) hasNewData, graphLink, tableColours = self.addTable(page, cellInfo, categoryHandlers, version, loggedTests, sel, tableHeader, filePath, heading) hasData |= hasNewData pageColours.update(tableColours) if graphLink: pageToGraphs.setdefault(page, []).append(graphLink) if hasData and versionToShow: link = HTMLgen.Href("#" + version, versionToShow) foundMinorVersions.setdefault(resourceName, HTMLgen.Container()).append(link) # put them in reverse order, most relevant first linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ] for tag in tags: details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitle)) details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview) selContainer = HTMLgen.Container() selectors = self.makeSelectors(subPageNames) for sel in selectors: target, linkName = sel.getLinkInfo(self.pageVersion) selContainer.append(HTMLgen.Href(target, linkName)) monthContainer = HTMLgen.Container() for sel in sorted(allMonthSelectors): target, linkName = sel.getLinkInfo(self.pageVersion) monthContainer.append(HTMLgen.Href(target, linkName)) for resourceName, page, pageColours in self.pagesOverview.values(): if len(monthContainer.contents) > 0: page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center')) graphs = pageToGraphs.get(page) page.prepend(HTMLgen.Heading(2, selContainer, align = 'center')) minorVersionHeader = foundMinorVersions.get(resourceName) if minorVersionHeader: if not graphs is None and len(graphs) > 1: page.prepend(HTMLgen.Heading(1, *graphs, align = 'center')) page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center')) page.prepend(HTMLgen.Heading(1, self.getHeading(resourceName), align = 'center')) if len(pageColours) > 0: page.prepend(HTMLgen.BR()); page.prepend(HTMLgen.BR()); page.script = self.getFilterScripts(pageColours) self.writePages() def getFilterScripts(self, pageColours): finder = ColourFinder(self.getConfigValue) rowHeaderColour = finder.find("row_header_bg") successColour = finder.find("success_bg") # Always put green at the start, we often want to filter that sortedColours = sorted(pageColours, key=lambda c: (c != successColour, c)) scriptCode = "var TEST_ROW_HEADER_COLOR = " + repr(rowHeaderColour) + ";\n" + \ "var Colors = " + repr(sortedColours) + ";" return [ HTMLgen.Script(code=scriptCode), HTMLgen.Script(src="../javascript/jquery.js"), HTMLgen.Script(src="../javascript/filter.js"), HTMLgen.Script(src="../javascript/plugin.js") ] def getHeading(self, resourceName, versionToShow=""): heading = self.getResultType(resourceName) + " results for " + self.pageTitle if versionToShow: heading += "." + versionToShow return heading def getTableHeader(self, resourceName, cellInfo, version, repositoryDirs): parts = [] if resourceName != cellInfo: parts.append(cellInfo.capitalize() + " Results") if len(repositoryDirs) > 1: parts.append(version) return " for ".join(parts) def getCellInfoForResource(self, resourceName): fromConfig = self.getConfigValue("historical_report_resource_page_tables", resourceName) if fromConfig: return fromConfig else: return [ resourceName ] def getResultType(self, resourceName): if resourceName: return resourceName.capitalize() else: return "Test" def getExistingMonthPages(self): return glob(os.path.join(self.pageDir, "test_" + self.pageVersion + "_all_???[0-9][0-9][0-9][0-9].html")) def compareTags(self, x, y): timeCmp = cmp(self.getTagTimeInSeconds(x), self.getTagTimeInSeconds(y)) if timeCmp: return timeCmp elif len(x) != len(y): # If the timing is the same, sort alphabetically # Any number should be sorted numerically, do this by padding them with leading zeroes return cmp(plugins.padNumbersWithZeroes(x), plugins.padNumbersWithZeroes(y)) else: return cmp(x, y) def getTagFromFile(self, fileName): return os.path.basename(fileName).replace("teststate_", "") def findTestStateFilesAndTags(self, repositoryDirs): allFiles = [] allTags = set() for _, dir in repositoryDirs: for root, _, files in os.walk(dir): for file in files: if file.startswith("teststate_"): allFiles.append((os.path.join(root, file), dir)) allTags.add(self.getTagFromFile(file)) return allFiles, sorted(allTags, self.compareTags) def processTestStateFile(self, stateFile, repository): state = self.readState(stateFile) testId = self.getTestIdentifier(stateFile, repository) extraVersion = self.findExtraVersion(repository) return testId, state, extraVersion def findExtraVersion(self, repository): versions = os.path.basename(repository).split(".") for i in xrange(len(versions)): version = ".".join(versions[i:]) if version in self.extraVersions: return version return "" def findGlobal(self, modName, className): exec "from " + modName + " import " + className + " as _class" return _class #@UndefinedVariable def getNewState(self, file): # Would like to do load(file) here... but it doesn't work with universal line endings, see Python bug 1724366 from cStringIO import StringIO unpickler = Unpickler(StringIO(file.read())) # Magic to keep us backward compatible in the face of packages changing... unpickler.find_global = self.findGlobal return unpickler.load() def readState(self, stateFile): file = open(stateFile, "rU") try: state = self.getNewState(file) if isinstance(state, plugins.TestState): return state else: return self.readErrorState("Incorrect type for state object.") except (UnpicklingError, ImportError, EOFError, AttributeError), e: if os.path.getsize(stateFile) > 0: return self.readErrorState("Stack info follows:\n" + str(e)) else: return plugins.Unrunnable("Results file was empty, probably the disk it resides on is full.", "Disk full?")
class GenerateWebPages(object): def __init__(self, getConfigValue, pageDir, resourceNames, pageTitle, pageSubTitles, pageVersion, extraVersions, descriptionInfo): self.pageTitle = pageTitle self.pageSubTitles = pageSubTitles self.pageVersion = pageVersion self.extraVersions = extraVersions self.pageDir = pageDir self.pagesOverview = OrderedDict() self.pagesDetails = OrderedDict() self.getConfigValue = getConfigValue self.resourceNames = resourceNames self.descriptionInfo = descriptionInfo self.diag = logging.getLogger("GenerateWebPages") def makeSelectors(self, subPageNames, tags=[]): allSelectors = [] firstSubPageName = self.getConfigValue("historical_report_subpages", "default")[0] for subPageName in subPageNames: if subPageName == firstSubPageName: suffix = "" else: suffix = "_" + subPageName.lower() allSelectors.append(Selector(subPageName, suffix, self.getConfigValue, tags)) return allSelectors def removeUnused(self, unused, tagData): successTags = {} for tag in unused: for fn in tagData.get(tag): if os.path.basename(fn).startswith("teststate_"): os.remove(fn) else: successTags.setdefault(fn, []).append(tag) for fn, tagsToRemove in successTags.items(): linesToKeep = [] with open(fn) as readFile: for line in readFile: tag = line.strip().split()[0] if tag not in tagsToRemove: linesToKeep.append(line) with open(fn, "w") as writeFile: for line in linesToKeep: writeFile.write(line) def generate(self, repositoryDirs, subPageNames, archiveUnused): minorVersionHeader = HTMLgen.Container() allMonthSelectors = set() latestMonth = None pageToGraphs = {} for version, repositoryDirInfo in repositoryDirs.items(): self.diag.info("Generating " + version) tagData, stateFiles, successFiles = self.findTestStateFilesAndTags(repositoryDirInfo) if len(stateFiles) > 0 or len(successFiles) > 0: tags = tagData.keys() tags.sort(self.compareTags) selectors = self.makeSelectors(subPageNames, tags) monthSelectors = SelectorByMonth.makeInstances(tags) allMonthSelectors.update(monthSelectors) allSelectors = selectors + list(reversed(monthSelectors)) # If we already have month pages, we only regenerate the current one if len(self.getExistingMonthPages()) == 0: selectors = allSelectors else: currLatestMonthSel = monthSelectors[-1] if latestMonth is None or currLatestMonthSel.linkName == latestMonth: selectors.append(monthSelectors[-1]) latestMonth = currLatestMonthSel.linkName selectedTags = set() unusedTags = set(tags) for selector in selectors: currTags = set(selector.selectedTags) selectedTags.update(currTags) if archiveUnused: unusedTags.difference_update(currTags) tags = filter(lambda t: t in selectedTags, tags) if archiveUnused and unusedTags: plugins.log.info("Automatic repository cleaning will now remove old data for the following runs:") for tag in sorted(unusedTags, self.compareTags): plugins.log.info("- " + tag) plugins.log.info("(To disable automatic repository cleaning in future, please run with the --manualarchive flag when collating the HTML report.)") self.removeUnused(unusedTags, tagData) loggedTests = OrderedDict() categoryHandlers = {} self.diag.info("Processing " + str(len(stateFiles)) + " teststate files") relevantFiles = 0 for stateFile, repository in stateFiles: tag = self.getTagFromFile(stateFile) if len(tags) == 0 or tag in tags: relevantFiles += 1 testId, state, extraVersion = self.processTestStateFile(stateFile, repository) loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = state categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, state.category, extraVersion, state) if relevantFiles % 100 == 0: self.diag.info("- Processed " + str(relevantFiles) + " files with matching tags so far") self.diag.info("Processed " + str(relevantFiles) + " relevant teststate files") self.diag.info("Processing " + str(len(successFiles)) + " success files") for successFile, repository in successFiles: testId = self.getTestIdentifier(successFile, repository) extraVersion = self.findExtraVersion(repository) with open(successFile) as f: fileTags = set() for line in f: parts = line.strip().split(" ", 1) if len(parts) != 2: continue tag, text = parts if tag in fileTags: sys.stderr.write("WARNING: more than one result present for tag '" + tag + "' in file " + successFile + "!\n") sys.stderr.write("Ignoring later ones\n") continue fileTags.add(tag) if len(tags) == 0 or tag in tags: loggedTests.setdefault(extraVersion, OrderedDict()).setdefault(testId, OrderedDict())[tag] = text categoryHandlers.setdefault(tag, CategoryHandler()).registerInCategory(testId, "success", extraVersion, text) self.diag.info("Processed " + str(len(successFiles)) + " success files") versionToShow = self.removePageVersion(version) hasData = False for sel in selectors: filePath = self.getPageFilePath(sel) if self.pagesOverview.has_key(filePath): page, pageColours = self.pagesOverview[filePath] else: page = self.createPage() pageColours = set() self.pagesOverview[filePath] = page, pageColours tableHeader = self.getTableHeader(version, repositoryDirs) heading = self.getHeading(versionToShow) hasNewData, graphLink, tableColours = self.addTable(page, self.resourceNames, categoryHandlers, version, loggedTests, sel, tableHeader, filePath, heading, repositoryDirInfo) hasData |= hasNewData pageColours.update(tableColours) if graphLink: pageToGraphs.setdefault(page, []).append(graphLink) if hasData and versionToShow: link = HTMLgen.Href("#" + version, versionToShow) minorVersionHeader.append(link) # put them in reverse order, most relevant first linkFromDetailsToOverview = [ sel.getLinkInfo(self.pageVersion) for sel in allSelectors ] for tag in tags: details = self.pagesDetails.setdefault(tag, TestDetails(tag, self.pageTitle, self.pageSubTitles)) details.addVersionSection(version, categoryHandlers[tag], linkFromDetailsToOverview) selContainer = HTMLgen.Container() selectors = self.makeSelectors(subPageNames) for sel in selectors: target, linkName = sel.getLinkInfo(self.pageVersion) selContainer.append(HTMLgen.Href(target, linkName)) monthContainer = HTMLgen.Container() if len(allMonthSelectors) == 1: # Don't want just one month, no navigation possible prevMonth = list(allMonthSelectors)[0].getPreviousMonthSelector() allMonthSelectors.add(prevMonth) for sel in sorted(allMonthSelectors): target, linkName = sel.getLinkInfo(self.pageVersion) monthContainer.append(HTMLgen.Href(target, linkName)) for page, pageColours in self.pagesOverview.values(): if len(monthContainer.contents) > 0: page.prepend(HTMLgen.Heading(2, monthContainer, align = 'center')) graphs = pageToGraphs.get(page) page.prepend(HTMLgen.Heading(2, selContainer, align = 'center')) if minorVersionHeader.contents: if not graphs is None and len(graphs) > 1: page.prepend(HTMLgen.Heading(1, *graphs, align = 'center')) page.prepend(HTMLgen.Heading(1, minorVersionHeader, align = 'center')) creationDate = TitleWithDateStamp("").__str__().strip() page.prepend(HTMLgen.Paragraph(creationDate, align="center")) page.prepend(HTMLgen.Heading(1, self.getHeading(), align = 'center')) if len(pageColours) > 0: page.prepend(HTMLgen.BR()); page.prepend(HTMLgen.BR()); page.script = self.getFilterScripts(pageColours) self.writePages() def getFilterScripts(self, pageColours): finder = ColourFinder(self.getConfigValue) rowHeaderColour = finder.find("row_header_bg") successColour = finder.find("success_bg") # Always put green at the start, we often want to filter that sortedColours = sorted(pageColours, key=lambda c: (c != successColour, c)) scriptCode = "var TEST_ROW_HEADER_COLOR = " + repr(rowHeaderColour) + ";\n" + \ "var Colors = " + repr(sortedColours) + ";" return [ HTMLgen.Script(code=scriptCode), HTMLgen.Script(src="../javascript/jquery.js"), HTMLgen.Script(src="../javascript/filter.js"), HTMLgen.Script(src="../javascript/comment.js") ] def getHeading(self, versionToShow=""): heading = "Test results for " + self.pageTitle if versionToShow: heading += "." + versionToShow return heading def getTableHeader(self, version, repositoryDirs): return version if len(repositoryDirs) > 1 else "" def getExistingMonthPages(self): return glob(os.path.join(self.pageDir, "test_" + self.pageVersion + "_all_???[0-9][0-9][0-9][0-9].html")) def compareTags(self, x, y): timeCmp = cmp(self.getTagTimeInSeconds(x), self.getTagTimeInSeconds(y)) if timeCmp: return timeCmp elif len(x) != len(y): # If the timing is the same, sort alphabetically # Any number should be sorted numerically, do this by padding them with leading zeroes return cmp(plugins.padNumbersWithZeroes(x), plugins.padNumbersWithZeroes(y)) else: return cmp(x, y) def getTagFromFile(self, fileName): return os.path.basename(fileName).replace("teststate_", "") def findTestStateFilesAndTags(self, repositoryDirs): tagData, stateFiles, successFiles = {}, [], [] for _, dir in repositoryDirs: self.diag.info("Looking for teststate files in " + dir) for root, _, files in sorted(os.walk(dir)): for file in files: path = os.path.join(root, file) if file.startswith("teststate_"): tag = self.getTagFromFile(file) stateFiles.append((path, dir)) tagData.setdefault(tag, []).append(path) elif file.startswith("succeeded_"): successFiles.append((path, dir)) with open(path) as f: for line in f: parts = line.split() if parts: tag = parts[0] tagData.setdefault(tag, []).append(path) self.diag.info("Found " + str(len(stateFiles)) + " teststate files and " + str(len(successFiles)) + " success files in " + dir) return tagData, stateFiles, successFiles def processTestStateFile(self, stateFile, repository): state = self.readState(stateFile) testId = self.getTestIdentifier(stateFile, repository) extraVersion = self.findExtraVersion(repository) return testId, state, extraVersion def findExtraVersion(self, repository): versions = os.path.basename(repository).split(".") for i in xrange(len(versions)): version = ".".join(versions[i:]) if version in self.extraVersions: return version return "" @staticmethod def findGlobal(modName, className): try: exec "from " + modName + " import " + className + " as _class" except ImportError: exec "from texttestlib." + modName + " import " + className + " as _class" return _class #@UndefinedVariable @classmethod def getNewState(cls, file): # Would like to do load(file) here... but it doesn't work with universal line endings, see Python bug 1724366 from cStringIO import StringIO unpickler = Unpickler(StringIO(file.read())) # Magic to keep us backward compatible in the face of packages changing... unpickler.find_global = cls.findGlobal return unpickler.load() @classmethod def readState(cls, stateFile): file = open(stateFile, "rU") try: state = cls.getNewState(file) if isinstance(state, plugins.TestState): return state else: return cls.readErrorState("Incorrect type for state object.") except Exception, e: if os.path.getsize(stateFile) > 0: return cls.readErrorState("Stack info follows:\n" + str(e)) else: return plugins.Unrunnable("Results file was empty, probably the disk it resides on is full.", "Disk full?")
def _process_default_flags(self, opts): # check if exists fpp or cpp output = '' try: output = exec_cmd('which cpp', show_error_msg=False).strip() except Exception as e: pass if output.endswith('cpp'): self.bin['pp'] = output else: output = '' try: output = exec_cmd('which fpp', show_error_msg=False).strip() except Exception as e: pass if output.endswith('fpp'): self.bin['pp'] = output else: print 'ERROR: neither cpp or fpp is found' sys.exit(-1) # parsing intrinsic skip option if opts.intrinsic: subflags = [] for line in opts.intrinsic: subflags.extend(line.split(',')) for subf in subflags: if subf and subf.find('=') > 0: key, value = subf.split('=') if key == 'except': self._attrs['search']['except'].extend( value.split(';')) elif key == 'add_intrinsic': Intrinsic_Procedures.extend( [name.lower() for name in value.split(';')]) else: raise UserException( 'Unknown intrinsic sub option: %s' % subf) else: if subf == 'skip': self._attrs['search']['skip_intrinsic'] = True elif subf == 'noskip': self._attrs['search']['skip_intrinsic'] = False else: raise UserException( 'Unknown intrinsic option(s) in %s' % subf) # parsing include parameters if opts.include: for inc in opts.include: inc_eq = inc.split('=') if len(inc_eq) == 1: for inc_colon in inc_eq[0].split(':'): self._attrs['include']['path'].append(inc_colon) if len(inc_eq) == 1: for inc_colon in inc_eq[0].split(':'): self._attrs['include']['path'].append(inc_colon) elif len(inc_eq) == 2: # TODO: support path for each file pass else: raise UserException('Wrong format include: %s' % inc) if opts.include_ini: process_include_option(opts.include_ini, self._attrs['include']) if opts.exclude_ini: process_exclude_option(opts.exclude_ini, self._attrs['exclude']) # parsing macro parameters if opts.macro: for line in opts.macro: for macro in line.split(','): macro_eq = macro.split('=') if len(macro_eq) == 1: self._attrs['include']['macro'][macro_eq[0]] = '1' elif len(macro_eq) == 2: self._attrs['include']['macro'][ macro_eq[0]] = macro_eq[1] else: raise UserException('Wrong format include: %s' % inc) files = None if opts.source: for line in opts.source: flags = OrderedDict() for subflag in line.lower().split(','): if subflag.find('=') > 0: key, value = subflag.split('=') if key == 'file': flags[key] = value.split(':') elif key == 'alias': p1, p2 = value.split(':') if p1.endswith('/'): p1 = p1[:-1] if p2.endswith('/'): p2 = p2[:-1] self._attrs['source']['alias'][p1] = p2 else: flags[key] = value else: flags[subflag] = None isfree = None isstrict = None if flags.has_key('format'): if flags['format'] == 'free': isfree = True elif flags['format'] == 'fixed': isfree = False else: raise UserException( 'format subflag of source flag should be either free or fixed.' ) if flags.has_key('strict'): if flags['strict'] == 'yes': isstrict = True elif flags['strict'] == 'no': isstrict = False else: raise UserException( 'strict subflag of source flag should be either yes or no.' ) if flags.has_key('file'): subflags = OrderedDict() if isfree: subflags['isfree'] = isfree if isstrict: subflags['isstrict'] = isstrict for file in flags['file']: abspath = os.path.abspath(file) if files is None: files = [] files.append(abspath) self._attrs['source']['file'][abspath] = subflags else: if isfree: self._attrs['source']['isfree'] = isfree if isstrict: self._attrs['source']['isstrict'] = isstrict # dupulicate paths per each alias if files is None: newpath = set() for path in self._attrs['include']['path']: newpath.add(path) for p1, p2 in self._attrs['source']['alias'].iteritems(): if path.startswith(p1): newpath.add(p2 + path[len(p1):]) elif path.startswith(p2): newpath.add(p1 + path[len(p2):]) self._attrs['include']['path'] = list(newpath) newfile = OrderedDict() for path, value in self._attrs['include']['file'].iteritems(): newfile[path] = value for p1, p2 in self._attrs['source']['alias'].iteritems(): if path.startswith(p1): newpath = p2 + path[len(p1):] newfile[newpath] = deepcopy(value) elif path.startswith(p2): newpath = p1 + path[len(p2):] newfile[newpath] = deepcopy(value) self._attrs['include']['file'] = newfile for path, value in self._attrs['include']['file'].iteritems(): if value.has_key('path'): newpath = set() for path in value['path']: newpath.add(path) for p1, p2 in self._attrs['source']['alias'].iteritems(): if path.startswith(p1): newpath.add(p2 + path[len(p1):]) elif path.startswith(p2): newpath.add(p1 + path[len(p2):]) value['path'] = list(newpath) # parsing debugging options if opts.debug: for dbg in opts.debug: param_path, value = dbg.split('=') param_split = param_path.lower().split('.') value_split = value.lower().split(',') curdict = self._attrs['debug'] for param in param_split[:-1]: curdict = curdict[param] exec('curdict[param_split[-1]] = value_split') # parsing logging options if opts.logging: for log in opts.logging: param_path, value = log.split('=') param_split = param_path.lower().split('.') value_split = value.lower().split(',') curdict = self._attrs['logging'] for param in param_split[:-1]: curdict = curdict[param] exec('curdict[param_split[-1]] = value_split') # mpi frame code in kernel driver if opts.add_mpi_frame: self._attrs['add_mpi_frame']['enabled'] = True for checkparams in opts.add_mpi_frame.split(','): key, value = checkparams.split('=') key = key.lower() if key in ['np', 'mpiexec']: self._attrs['add_mpi_frame'][key] = value else: print 'WARNING: %s is not supported add_mpi_frame parameter' % key if opts.outdir: self._attrs['path']['outdir'] = opts.outdir # create state directories and change working directory if not os.path.exists(self._attrs['path']['outdir']): os.makedirs(self._attrs['path']['outdir']) os.chdir(self._attrs['path']['outdir'])
def SecondButtonPress(url, HostPage, page=None, elm="", elm2="", wform=0, addkey=None, removekey=None, cookies={}, wait=0, captchakey=None, captchaimg=None, captchacookies={}, split=None, GetUserAgent=None): domain = HostPage.split('/')[2] payload = OrderedDict() headers = OrderedDict() headers[ 'Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' headers['Accept-Charset'] = 'ISO-8859-1,utf-8;q=0.7,*;q=0.3' headers['Accept-Encoding'] = 'gzip,deflate,sdch' headers['Accept-Language'] = 'en-US,en;q=0.8' headers['Cache-Control'] = 'max-age=0' headers['Connection'] = 'keep-alive' headers['Referer'] = url if GetUserAgent == None: headers['User-Agent'] = UserAgent else: headers['User-Agent'] = GetUserAgent session = requests.session() requests.utils.add_dict_to_cookiejar(session.cookies, cookies) if page != None: s = page else: s = session.get(HostPage, headers=headers) try: form = HTML.ElementFromString(s.content) except: form = HTML.ElementFromString(s) try: whichform = form.xpath('//' + elm + 'form')[wform] if len(whichform.xpath('./' + elm2 + 'input')) != 0: for input in whichform.xpath('./' + elm2 + 'input'): if input.get('name') != None: key = input.get('name') value = input.get('value') if key != 'method_premium': if not payload.has_key(key): payload[key] = [value] else: payload[key].append(value) else: for input in form.xpath('//input'): if input.get('name') != None: key = input.get('name') value = input.get('value') if key != 'method_premium': if not payload.has_key(key): payload[key] = [value] else: payload[key].append(value) if captchakey != None: try: payload[captchakey] = GetImgValue(url=captchaimg, HostPage=HostPage, UserAgent=UserAgent, cookies=captchacookies, split=split) except: payload[captchakey] = "Processing Issue" if addkey != None: payload.update(addkey) if removekey != None: for key in removekey: try: del payload[key] except KeyError: pass Log(payload) if wait != 0: #wait required time.sleep(wait) headers['Content-Type'] = 'application/x-www-form-urlencoded' headers['Origin'] = 'http://' + domain headers['Referer'] = HostPage formaction = form.xpath('//' + elm + 'form')[wform].get('action') if formaction != None and formaction != "": if formaction.split('/')[0] == 'http:': HostPage = formaction elif len(formaction.split('/')) == 2: HostPage = 'http://' + HostPage.split('/')[2] + formaction elif len(formaction.split('/')) == 1: HostPage = HostPage.rpartition('/')[0] + '/' + formaction r = session.post(HostPage, data=payload, headers=headers, allow_redirects=True) r.raise_for_status() r.cookies = session.cookies return r except: s.cookies = session.cookies return s
policy.set_action('permit') if re.search('dst', x[8]) != None: policy.set_nat('d') elif re.search('src', x[8]) != None: policy.set_nat('s') if re.search('log', x[8]) != None: policy.set_log() if re.search('count', x[8]) != None: policy.set_count() if re.search('no-session-backup', x[8]) != None: policy.set_nosb() if re.search('webauth', x[8]) != None: policy.set_webauth() policy.set_action(policy.action + "~") if policy_dict.has_key(policy.id): sys.stderr.write("Duplicate policy id entry detected: %s\n" % policy.id) continue policy_dict[policy.id] = policy continue # marks the beginning of a policy statement with extra configuration items # ['Id',None] x = policy_regex_begin.split(line)[1:-1] if len(x): id = x[0] if x[1] == "disable": policy_dict[id].set_disable() config_iter.next()
def SecondButtonPress( url, HostPage, page=None, elm="", elm2="", wform=0, addkey=None, removekey=None, cookies={}, wait=0, captchakey=None, captchaimg=None, captchacookies={}, split=None, GetUserAgent=None, ): domain = HostPage.split("/")[2] payload = OrderedDict() headers = OrderedDict() headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" headers["Accept-Charset"] = "ISO-8859-1,utf-8;q=0.7,*;q=0.3" headers["Accept-Encoding"] = "gzip,deflate,sdch" headers["Accept-Language"] = "en-US,en;q=0.8" headers["Cache-Control"] = "max-age=0" headers["Connection"] = "keep-alive" headers["Referer"] = url if GetUserAgent == None: headers["User-Agent"] = UserAgent else: headers["User-Agent"] = GetUserAgent session = requests.session() requests.utils.add_dict_to_cookiejar(session.cookies, cookies) if page != None: s = page else: s = session.get(HostPage, headers=headers) try: form = HTML.ElementFromString(s.content) except: form = HTML.ElementFromString(s) try: whichform = form.xpath("//" + elm + "form")[wform] if len(whichform.xpath("./" + elm2 + "input")) != 0: for input in whichform.xpath("./" + elm2 + "input"): if input.get("name") != None: key = input.get("name") value = input.get("value") if key != "method_premium": if not payload.has_key(key): payload[key] = [value] else: payload[key].append(value) else: for input in form.xpath("//input"): if input.get("name") != None: key = input.get("name") value = input.get("value") if key != "method_premium": if not payload.has_key(key): payload[key] = [value] else: payload[key].append(value) if captchakey != None: try: payload[captchakey] = GetImgValue( url=captchaimg, HostPage=HostPage, UserAgent=UserAgent, cookies=captchacookies, split=split ) except: payload[captchakey] = "Processing Issue" if addkey != None: payload.update(addkey) if removekey != None: for key in removekey: try: del payload[key] except KeyError: pass Log(payload) if wait != 0: # wait required time.sleep(wait) headers["Content-Type"] = "application/x-www-form-urlencoded" headers["Origin"] = "http://" + domain headers["Referer"] = HostPage formaction = form.xpath("//" + elm + "form")[wform].get("action") if formaction != None and formaction != "": if formaction.split("/")[0] == "http:": HostPage = formaction elif len(formaction.split("/")) == 2: HostPage = "http://" + HostPage.split("/")[2] + formaction elif len(formaction.split("/")) == 1: HostPage = HostPage.rpartition("/")[0] + "/" + formaction r = session.post(HostPage, data=payload, headers=headers, allow_redirects=True) r.raise_for_status() r.cookies = session.cookies return r except: s.cookies = session.cookies return s
class Atoms: "Class to deal with a single frame of an xyz movie" def __init__(self, filename=None, *allocargs, **allockwargs): self._atomsptr = None self.alloc(*allocargs, **allockwargs) if filename is not None: self.read(filename) def alloc(self, n=0, n_int=0, n_real=3, n_str=1, n_logical=0, use_libatoms=False, atomsptr=None, properties=None, \ lattice=numpy.array([[100.,0.,0.],[0.,100.,0.],[0.,0.,100.]]), \ params=ParamReader(),element='Si'): if use_libatoms or atomsptr is not None: if atomsptr is None: self.attach(libatoms.atoms_initialise(n, lattice)) else: self.attach(atomsptr) else: self.n = n self.lattice = lattice self.g = numpy.linalg.inv(self.lattice) self.params = params # Create single property for atomic positions self.real = numpy.zeros((self.n, n_real), dtype=float) self.int = numpy.zeros((self.n, n_int), dtype=int) self.str = numpy.zeros((self.n, n_str), dtype='S10') self.logical = numpy.zeros((self.n, n_logical), dtype=bool) if properties is None: self.properties = OrderedDict({ 'species': ('S', slice(0, 1)), 'pos': ('R', slice(0, 3)) }) else: self.properties = properties self.repoint() def attach(self, atomsptr): self.finalise() self._atomsptr = atomsptr self.n, n_int, n_real, n_str, n_logical, iloc, rloc, sloc, lloc, latticeloc, gloc = \ libatoms.atoms_get_data(self._atomsptr) self.int = arraydata((self.n, n_int), int, iloc) self.real = arraydata((self.n, n_real), float, rloc) self.str = arraydata((self.n, n_str), 'S10', sloc) self.logical = arraydata((self.n, n_logical), bool, sloc) self.lattice = arraydata((3, 3), float, latticeloc) self.g = arraydata((3, 3), float, gloc) self.params = {} property_code_map = {1: 'I', 2: 'R', 3: 'S', 4: 'L'} self.properties = OrderedDict() for i in range(libatoms.atoms_n_properties(self._atomsptr)): key, (code, startcol, stopcol) = libatoms.atoms_nth_property( self._atomsptr, i + 1) self.properties[key.strip()] = (property_code_map[code], slice(startcol - 1, stopcol)) self.repoint() def finalise(self): if self._atomsptr is not None: libatoms.atoms_finalise(self._atomsptr) self._atomsptr = None def __repr__(self): return 'Atoms(n=%d, properties=%s, params=%s, lattice=%s)' % \ (self.n, repr(self.properties), repr(self.params), repr(self.lattice)) def __cmp__(self, other): if other is None: return 1 # Quick checks if (self.n != other.n) or (self.comment() != other.comment()): return 1 # Check if arrays match one by one for this, that in \ (self.lattice, other.lattice), \ (self.real, other.real), (self.int, other.int), \ (self.str, other.str), (self.logical, other.logical): if (not numpy.all(this == that)): return 1 return 0 def update(self, other): "Overwrite contents of this Atoms object with a copy of an other" self.n = other.n self.lattice = other.lattice.copy() self.g = other.g.copy() self.params = other.params.copy() self.properties = other.properties.copy() self.real = other.real[:] self.int = other.int[:] self.str = other.str[:] self.logical = other.logical[:] self.repoint() def add_property(self, name, value, ncols=1): "Add a new property to this Atoms object. Value can be a scalar int or float, or an array." # Scalar int or list of all ints if (type(value) == type(0)) or \ ((type(value) == type([])) and numpy.all(numpy.array(map(type,value)) == type(0))): n_int = self.int.shape[1] intcopy = self.int.copy() self.int = numpy.zeros((self.n, n_int + ncols), dtype=int) self.int[:, :n_int] = intcopy if ncols == 1: self.int[:, n_int] = value else: self.int[:, n_int:n_int + ncols] = value self.properties[name] = ('I', slice(n_int, n_int + ncols)) self.repoint() # Scalar real or list of all reals elif (type(value) == type(0.0)) or \ (type(value) == type([]) and numpy.all(numpy.array(map(type,value)) == type(0.0))): n_real = self.real.shape[1] realcopy = self.real.copy() self.real = numpy.zeros((self.n, n_real + ncols), dtype=float) self.real[:, :n_real] = realcopy if ncols == 1: self.real[:, n_real] = value else: self.real[:, n_real:n_real + ncols] = value self.properties[name] = ('R', slice(n_real, n_real + ncols)) self.repoint() # Scalar string or list of strings elif (type(value) == type('')) or \ ((type(value) == type([])) and numpy.all(numpy.array(map(type,value)) == type(''))): n_str = self.str.shape[1] strcopy = self.str.copy() self.str = numpy.zeros((self.n, n_str + ncols), dtype='S10') self.str[:, :n_str] = strcopy if ncols == 1: self.str[:, n_str] = value else: self.str[:, n_str:n_str + ncols] = value self.properties[name] = ('S', slice(n_str, n_str + ncols)) self.repoint() # Scalar logical or list of logicals elif (type(value) == type(False)) or \ ((type(value) == type([])) and numpy.all(numpy.array(map(type,value)) == type(False))): n_logical = self.logical.shape[1] logicalcopy = self.logical.copy() self.logical = numpy.zeros((self.n, n_logical + ncols), dtype=bool) self.logical[:, :n_logical] = logicalcopy if ncols == 1: self.logical[:, n_logical] = value else: self.logical[:, n_logical:n_logical + ncols] = value self.properties[name] = ('L', slice(n_logical, n_logical + ncols)) self.repoint() # Array type elif type(value) == type(numpy.array([])): if value.shape[0] != self.n: raise ValueError('length of value array (%d) != number of atoms (%d)' % \ (value.shape[0],self.n)) if value.dtype.kind == 'f': try: ncols = value.shape[1] except IndexError: ncols = 1 n_real = self.real.shape[1] realcopy = self.real.copy() self.real = numpy.zeros((self.n, n_real + ncols), dtype=float) self.real[:, :n_real] = realcopy if ncols == 1: self.real[:, n_real] = value.copy() else: self.real[:, n_real:n_real + ncols] = value.copy() self.properties[name] = ('R', slice(n_real, n_real + ncols)) self.repoint() elif value.dtype.kind == 'i': try: ncols = value.shape[1] except IndexError: ncols = 1 n_int = self.int.shape[1] intcopy = self.int.copy() self.int = numpy.zeros((self.n, n_int + ncols), dtype=int) self.int[:, :n_int] = intcopy if ncols == 1: self.int[:, n_int] = value.copy() else: self.int[:, n_int:n_int + ncols] = value.copy() self.properties[name] = ('I', slice(n_int, n_int + ncols)) self.repoint() elif value.dtype.kind == 'S': try: ncols = value.shape[1] except IndexError: ncols = 1 n_str = self.str.shape[1] strcopy = self.str.copy() self.str = numpy.zeros((self.n, n_str + ncols), dtype='S10') self.str[:, :n_str] = strcopy if ncols == 1: self.str[:, n_str] = value.copy() else: self.str[:, n_str:n_str + ncols] = value.copy() self.properties[name] = ('S', slice(n_str, n_str + ncols)) self.repoint() elif value.dtype == numpy.dtype('bool'): try: ncols = value.shape[1] except IndexError: ncols = 1 n_logical = self.logical.shape[1] logicalcopy = self.logical.copy() self.logical = numpy.zeros((self.n, n_logical + ncols), dtype=numpy.dtype('bool')) self.logical[:, :n_logical] = logicalcopy if ncols == 1: self.logical[:, n_logical] = value.copy() else: self.logical[:, n_logical:n_logical + ncols] = value.copy() self.properties[name] = ('S', slice(n_logical, n_logical + ncols)) self.repoint() else: raise ValueError( "Don't know how to add array property of type %r" % value.dtype) else: raise ValueError("Don't know how to add property of type %r" % type(value)) def repoint(self): "Make pointers to columns in real and int" for prop, (ptype, cols) in self.properties.items(): if ptype == 'R': if cols.stop - cols.start == 1: setattr(self, prop, self.real[:, cols.start]) else: setattr(self, prop, self.real[:, cols]) elif ptype == 'I': if cols.stop - cols.start == 1: setattr(self, prop, self.int[:, cols.start]) else: setattr(self, prop, self.int[:, cols]) elif ptype == 'S': if cols.stop - cols.start == 1: setattr(self, prop, self.str[:, cols.start]) else: setattr(self, prop, self.str[:, cols]) elif ptype == 'L': if cols.stop - cols.start == 1: setattr(self, prop, self.logical[:, cols.start]) else: setattr(self, prop, self.logical[:, cols]) else: raise ValueError('Bad property type :' + str(self.properties[prop])) def comment(self, properties=None): "Return the comment line for this Atoms object" if properties is None: props = self.properties.keys() else: props = properties lattice_str = 'Lattice="' + ' '.join( map(str, numpy.reshape(self.lattice, 9))) + '"' props_str = 'Properties=' + ':'.join(map(':'.join, \ zip(props, \ [self.properties[k][0] for k in props], \ [str(self.properties[k][1].stop-self.properties[k][1].start) for k in props]))) return lattice_str + ' ' + props_str + ' ' + str(self.params) def _props_dtype(self, props=None): "Return a record array dtype for the specified properties (default all)" if props is None: props = self.properties.keys() result = [] fmt_map = {'R': 'd', 'I': 'i', 'S': 'S10', 'L': 'bool'} for prop in props: ptype, cols = self.properties[prop] if cols.start == cols.stop - 1: result.append((prop, fmt_map[ptype])) else: for c in range(cols.stop - cols.start): result.append((prop + str(c), fmt_map[ptype])) return numpy.dtype(result) def to_recarray(self, props=None): "Return a record array contains specified properties in order (defaults to all properties)" if props is None: props = self.properties.keys() # Create empty record array with correct dtype data = numpy.zeros(self.n, self._props_dtype(props)) # Copy cols from self.real and self.int into data recarray for prop in props: ptype, cols = self.properties[prop] if ptype == 'R': if cols.start == cols.stop - 1: data[prop] = self.real[:, cols.start] else: for c in range(cols.stop - cols.start): data[prop + str(c)] = self.real[:, cols.start + c] elif ptype == 'I': if cols.start == cols.stop - 1: data[prop] = self.int[:, cols.start] else: for c in range(cols.stop - cols.start): data[prop + str(c)] = self.int[:, cols.start + c] elif ptype == 'S': if cols.start == cols.stop - 1: data[prop] = self.str[:, cols.start] else: for c in range(cols.stop - cols.start): data[prop + str(c)] = self.str[:, cols.start + c] elif ptype == 'L': if cols.start == cols.stop - 1: data[prop] = self.logical[:, cols.start] else: for c in range(cols.stop - cols.start): data[prop + str(c)] = self.logical[:, cols.start + c] else: raise ValueError('Bad property type :' + str(self.properties[prop][1])) return data def update_from_recarray(self, data, props=None): """Update Atoms data from a record array. By default all properties are updated; use the props argument to update only a subset""" if props is None: props = self.properties.keys() if data.dtype != self._props_dtype(props) or data.shape != (self.n, ): raise ValueError('Data shape is incorrect') # Copy cols from data recarray into self.real and self.int for prop in props: ptype, cols = self.properties[prop] if ptype == 'R': if cols.start == cols.stop - 1: self.real[:, cols.start] = data[prop] else: for c in range(cols.stop - cols.start): self.real[:, cols.start + c] = data[prop + str(c)] elif ptype == 'I': if cols.start == cols.stop - 1: self.int[:, cols.start] = data[prop] else: for c in range(cols.stop - cols.start): self.int[:, cols.start + c] = data[prop + str(c)] elif ptype == 'S': if cols.start == cols.stop - 1: self.str[:, cols.start] = data[prop] else: for c in range(cols.stop - cols.start): self.str[:, cols.start + c] = data[prop + str(c)] elif ptype == 'L': if cols.start == cols.stop - 1: self.logical[:, cols.start] = data[prop] else: for c in range(cols.stop - cols.start): self.logical[:, cols.start + c] = data[prop + str(c)] else: raise ValueError('Bad property type :' + str(self.properties[prop][1])) def read_xyz(self, xyz): "Read from extended XYZ filename or open file." opened = False if type(xyz) == type(''): xyz = open(xyz, 'r') opened = True line = xyz.next() if not line: return False n = int(line.strip()) comment = (xyz.next()).strip() # Parse comment line params = ParamReader(comment) if not 'Properties' in params: raise ValueError('Properties missing from comment line') properties, n_int, n_real, n_str, n_logical = _parse_properties( params['Properties']) del params['Properties'] # Get lattice if not 'Lattice' in params: raise ValueError('No lattice found in xyz file') lattice = numpy.reshape(params['Lattice'], (3, 3)) del params['Lattice'] self.alloc(n=n,lattice=lattice,properties=properties,params=params,\ n_int=n_int,n_real=n_real,n_str=n_str,n_logical=n_logical) props_dtype = self._props_dtype() converters = [_getconv(props_dtype.fields[name][0]) \ for name in props_dtype.names] X = [] for i, line in enumerate(xyz): vals = line.split() row = tuple([converters[j](val) for j, val in enumerate(vals)]) X.append(row) if i == self.n - 1: break # Only read self.n lines try: data = numpy.array(X, props_dtype) except TypeError: raise IOError('End of file reached before end of frame') if opened: xyz.close() try: self.update_from_recarray(data) except ValueError: # got a partial frame, must be end of file return False else: return True def read_netcdf(self, fname, frame=0): from pupynere import netcdf_file nc = netcdf_file(fname) self.n = nc.dimensions['atom'] self.lattice = make_lattice(nc.variables['cell_lengths'][frame], nc.variables['cell_angles'][frame]) self.g = numpy.linalg.inv(self.lattice) self.params = OrderedDict() self.properties = OrderedDict() self.real = numpy.zeros((self.n, 0), dtype=float) self.int = numpy.zeros((self.n, 0), dtype=int) self.str = numpy.zeros((self.n, 0), dtype='S10') self.logical = numpy.zeros((self.n, 0), dtype=bool) vars = nc.variables.keys() vars = filter(lambda v: not v in ('cell_angles', 'cell_lengths'), vars) # ensure first var is species and second positions sp = vars.index('species') if sp != 0: vars[sp], vars[0] = vars[0], vars[sp] pos = vars.index('coordinates') if pos != 1: vars[pos], vars[1] = vars[1], vars[pos] for v in vars: d = nc.variables[v].dimensions if d[0] != 'frame': continue value = nc.variables[v][frame] if value.dtype == numpy.dtype('|S1'): value = [''.join(x).strip() for x in value] if len(d) == 1 or (len(d) == 2 and d[1] in ('label', 'string')): if (len(d) == 2 and d[1] in ('label', 'string')): value = ''.join(value) self.params[v] = value else: # Name mangling if v == 'coordinates': p = 'pos' elif v == 'velocities': p = 'velo' else: p = v value = nc.variables[v][frame] if value.dtype == numpy.dtype('|S1'): value = [''.join(x).strip() for x in value] self.add_property(p, value) def write_xyz(self, xyz=sys.stdout, properties=None): "Write atoms in extended XYZ format. xyz can be a filename or open file" if properties is None: # Sort by original order props = self.properties.keys() else: props = properties species = getattr(self, props[0]) if len(species.shape) != 1 or species.dtype.kind != 'S': raise ValueError('First property must be species like') pos = getattr(self, props[1]) if pos.shape[1] != 3 or pos.dtype.kind != 'f': raise ValueError('Second property must be position like') data = self.to_recarray(props) format = ''.join( [_getfmt(data.dtype.fields[name][0]) for name in data.dtype.names]) + '\n' opened = False if type(xyz) == type(''): xyz = open(xyz, 'w') opened = True xyz.write('%d\n' % self.n) xyz.write(self.comment(properties) + '\n') for i in range(self.n): xyz.write(format % tuple(data[i])) if opened: xyz.close() def read_cell(self, cell): "Read atoms from a CastepCell object or file" if hasattr(cell, 'next'): # looks like a file cell = castep.CastepCell(cell) self.update(cell.to_atoms()) def write_cell(self, fname): "Write Atoms to a cell file" cell = castep.CastepCell() cell.update_from_atoms(self) cell.write(fname) def read_geom(self, geom): "Read from a CASTEP .geom file" self.update(castep.read_geom(geom)) def read_castep(self, castepfile): "Read from a .castep output file" if self.n != 0: self.update( castep.read_castep_output(castepfile, self, abort=False)) else: self.update(castep.read_castep_output(castepfile, abort=False)) def read(self, fname, filetype=None): "Attempt to guess type of file from extension and call appropriate read method" opened = False if type(fname) == type(''): if fname.endswith('.gz'): import gzip fh = gzip.open(fname) fname = fname[:-3] # remove .gz elif fname.endswith('.nc'): fh = fname else: fh = open(fname, 'r') opened = True # Guess file type from extension if filetype is None: root, filetype = os.path.splitext(fname) filetype = filetype[1:] # remove '.' else: fh = fname # Default to xyz format if not filetype in ['cell', 'geom', 'xyz', 'castep', 'nc']: filetype = 'xyz' if filetype == 'xyz': self.read_xyz(fh) elif filetype == 'cell': self.read_cell(fh) elif filetype == 'geom': self.read_geom(fh) elif filetype == 'castep': self.read_castep(fh) elif filetype == 'nc': self.read_netcdf(fh) if opened: fh.close() def write(self, fname, filetype=None): opened = False if type(fname) == type(''): if fname.endswith('.gz'): import gzip fh = gzip.open(fname, 'w') fname = fname[:-3] # remove .gz else: fh = open(fname, 'w') # Guess file type from extension if filetype is None: root, filetype = os.path.splitext(fname) filetype = filetype[1:] # remove '.' opened = True else: fh = fname # Default to xyz format if not filetype in ['xyz', 'cfg', 'cell']: filetype = 'xyz' if filetype == 'xyz': self.write_xyz(fh) elif filetype == 'cfg': self.write_cfg(fh) elif filetype == 'cell': self.write_cell(fh) if opened: fh.close() def write_cfg(self, cfg=sys.stdout, shift=numpy.array([0., 0., 0.]), properties=None): """Write atoms in AtomEye extended CFG format. Returns a list of auxiliary properties actually written to CFG file, which may be abbreviated compared to those requested since AtomEye has a maximum of 32 aux props.""" opened = False if type(cfg) == type(''): cfg = open(cfg, 'w') opened = True if properties is None: properties = self.properties.keys() # Header line cfg.write('Number of particles = %d\n' % self.n) cfg.write('# ' + self.comment(properties) + '\n') # Lattice vectors for i in 0, 1, 2: for j in 0, 1, 2: cfg.write('H0(%d,%d) = %16.8f\n' % (i + 1, j + 1, self.lattice[i, j])) cfg.write('.NO_VELOCITY.\n') # Check first property is position-like species = getattr(self, properties[0]) if len(species.shape) != 1 or species.dtype.kind != 'S': raise ValueError('First property must be species like') pos = getattr(self, properties[1]) if pos.shape[1] != 3 or pos.dtype.kind != 'f': raise ValueError('Second property must be position like') if not self.properties.has_key('frac_pos'): self.add_property('frac_pos', 0.0, ncols=3) self.frac_pos[:] = numpy.array( [numpy.dot(pos[i, :], self.g) + shift for i in range(self.n)]) if not self.properties.has_key('mass'): self.add_property('mass', map(ElementMass.get, self.species)) properties = filter( lambda p: p not in ('pos', 'frac_pos', 'mass', 'species'), properties) # AtomEye can handle a maximum of 32 columns, so we might have to throw away # some of the less interesting propeeties def count_cols(): n_aux = 0 for p in properties: s = getattr(self, p).shape if len(s) == 1: n_aux += 1 else: n_aux += s[1] return n_aux boring_properties = ['travel', 'avgpos', 'oldpos', 'acc', 'velo'] while count_cols() > 32: if len(boring_properties) == 0: raise ValueError('No boring properties left!') try: next_most_boring = boring_properties.pop(0) del properties[properties.index(next_most_boring)] except IndexError: pass # this boring property isn't in the list: move on to next properties = ['species', 'mass', 'frac_pos'] + properties data = self.to_recarray(properties) cfg.write('entry_count = %d\n' % (len(data.dtype.names) - 2)) # 3 lines per atom: element name, mass and other data format = '%s\n%12.4f\n' for i, name in enumerate(data.dtype.names[2:]): if i > 2: cfg.write('auxiliary[%d] = %s\n' % (i - 3, name)) format = format + _getfmt(data.dtype.fields[name][0]) format = format + '\n' for i in range(self.n): cfg.write(format % tuple(data[i])) if opened: cfg.close() # Return column names as a list return list(data.dtype.names) def filter(self, mask): "Return smaller Atoms with only the elements where mask is true" other = Atoms() if mask is None: mask = numpy.zeros((self.n, ), numpy.bool) mask[:] = True other.n = count(mask) other.lattice = self.lattice.copy() other.g = self.g.copy() other.params = self.params.copy() other.properties = self.properties.copy() other.real = self.real[mask] other.int = self.int[mask] other.str = self.str[mask] other.logical = self.logical[mask] other.repoint() return other def copy(self): if self.n == 0: return Atoms() else: return self.filter(mask=None) def add(self, newpos, newspecies): if type(newpos) == type([]): newpos = numpy.array(newpos) if len(newpos.shape) == 1: n_new = 1 else: n_new = newpos.shape[0] oldn = self.n self.n = self.n + n_new self.real = numpy.resize(self.real, (self.n, self.real.shape[1])) self.int = numpy.resize(self.int, (self.n, self.int.shape[1])) self.str = numpy.resize(self.str, (self.n, self.str.shape[1])) self.logical = numpy.resize(self.logical, (self.n, self.logical.shape[1])) self.repoint() self.pos[oldn:self.n] = newpos self.species[oldn:self.n] = newspecies def remove(self, discard): keep = [i for i in range(self.n) if not i in discard] self.n = len(keep) self.real = self.real[keep] self.int = self.int[keep] self.str = self.str[keep] self.logical = self.logical[keep] self.repoint() def supercell(self, n1, n2, n3): other = Atoms(n=self.n*n1*n2*n3,n_int=self.int.shape[1],\ n_real=self.real.shape[1], \ properties=self.properties.copy()) other.lattice[0, :] = self.lattice[0, :] * n1 other.lattice[1, :] = self.lattice[1, :] * n2 other.lattice[2, :] = self.lattice[2, :] * n3 other.g = numpy.linalg.inv(other.lattice) for i in range(n1): for j in range(n2): for k in range(n3): p = numpy.dot(self.lattice, numpy.array([i, j, k])) for n in range(self.n): nn = ((i * n2 + j) * n3 + k) * self.n + n other.int[nn, :] = self.int[n, :] other.real[nn, :] = self.real[n, :] other.logical[nn, :] = self.logical[n, :] other.str[nn, :] = self.str[n, :] other.pos[nn, :] = self.pos[n, :] + p other.repoint() return other def cell_volume(self): return abs( numpy.dot(numpy.cross(self.lattice[0, :], self.lattice[1, :]), self.lattice[2, :]))