def findProjectURL(cls, url): # Check to see if we already know what the path is, and if we think we # do, make sure the project path still exists if url in cls.buffer_url_to_project_dir: path = cls.buffer_url_to_project_dir[url] if vfs.is_folder(path): return path del cls.buffer_url_to_project_dir[url] # Look for a new project path starting from the specified URL. if vfs.is_folder(url): last = vfs.normalize(url) else: last = vfs.normalize(vfs.get_dirname(url)) cls.dprint(str(last.path)) while not last.path.is_relative() and True: path = last.resolve2("%s" % (cls.classprefs.project_directory)) cls.dprint(path.path) if vfs.is_folder(path): cls.buffer_url_to_project_dir[url] = path return path path = vfs.get_dirname(path.resolve2("..")) if path == last: cls.dprint("Done!") break last = path return None
def export(cls, filename, cube, options=None, progress=None): if options is None: options = dict() if 'interleave' not in options: root, ext = os.path.splitext(filename) if ext: options['interleave'] = ext[1:].lower() url = vfs.normalize(filename) dprint("writing cube to %s" % url) fh = vfs.open_write(url) cube.writeRawData(fh, options, progress) fh.close() headername = vfs.normalize(getCanonicalHeader(str(url))) dprint("writing header to %s" % headername) c = copy.copy(cube) for k, v in options.iteritems(): setattr(c, k, v) c.data_offset = 0 c.header_offset = 0 c.file_offset = 0 print c h = Header() h.getCubeAttributes(c) hfh = vfs.open_write(headername) hfh.write(str(h)) hfh.close()
def findProjectURL(cls, url): # Check to see if we already know what the path is, and if we think we # do, make sure the project path still exists if url in cls.buffer_url_to_project_dir: path = cls.buffer_url_to_project_dir[url] if vfs.is_folder(path): return path del cls.buffer_url_to_project_dir[url] # Look for a new project path starting from the specified URL. if vfs.is_folder(url): last = vfs.normalize(url) else: last = vfs.normalize(vfs.get_dirname(url)) cls.dprint(str(last.path)) while not last.path.is_relative() and True: path = last.resolve2("%s" % (cls.classprefs.project_directory)) cls.dprint(path.path) if vfs.is_folder(path): cls.buffer_url_to_project_dir[url] = path return path path = vfs.get_dirname(path.resolve2('..')) if path == last: cls.dprint("Done!") break last = path return None
def setDynamicChoices(self): text = self.text.GetValue() if text[:-1] == self.initial: if text.endswith("/") or text.endswith(os.sep): uri = vfs.normalize(text) change = str(vfs.normalize(uri.scheme + ":")) self.text.ChangeValue(change) self.text.SetInsertionPointEnd() CompletionMinibuffer.setDynamicChoices(self)
def setDynamicChoices(self): text = self.text.GetValue() if text[:-1] == self.initial: if text.endswith('/') or text.endswith(os.sep): uri = vfs.normalize(text) change = str(vfs.normalize(uri.scheme + ":")) self.text.ChangeValue(change) self.text.SetInsertionPointEnd() CompletionMinibuffer.setDynamicChoices(self)
def setURL(self, url): # raw_url stores the entire URL, including query string and fragments self.raw_url = vfs.normalize(url) if not url: url = vfs.normalize("untitled") elif self.canonicalize: url = vfs.canonical_reference(url) else: url = vfs.normalize(url) self.url = url self.saveTimestamp() self.pending_url = None
def findHeaders(url): urls = [] for ext in _header_extensions: header = vfs.normalize(str(url)+ext) if vfs.exists(header): urls.append(header) name,ext = os.path.splitext(str(url)) for ext in _header_extensions: header = vfs.normalize(name+ext) if vfs.exists(header): urls.append(header) return urls
def findHeaders(url): urls = [] for ext in _header_extensions: header = vfs.normalize(str(url) + ext) if vfs.exists(header): urls.append(header) name, ext = os.path.splitext(str(url)) for ext in _header_extensions: header = vfs.normalize(name + ext) if vfs.exists(header): urls.append(header) return urls
def createProject(cls, topdir): url = vfs.normalize(topdir) if url in cls.url_to_project_mapping: raise TypeError("Project already exists.") proj_dir = url.resolve2(cls.classprefs.project_directory) if not vfs.is_folder(proj_dir): if not vfs.exists(proj_dir): vfs.make_folder(proj_dir) else: raise TypeError( "Can't create directory %s -- seems already exist as a file" % proj_dir) info = cls.registerProject(None, proj_dir) info.savePrefs() cls.dprint(info) buffers = BufferList.getBuffers() for buffer in buffers: if buffer.url.scheme != "file": continue cls.dprint(u"prefix=%s topdir=%s" % (buffer.url.path.get_prefix(url.path), url.path)) if buffer.url.path.get_prefix(url.path) == url.path: cls.dprint(u"belongs in project! %s" % buffer.url.path) for mode in buffer.iterViewers(): mode.project_info = info else: cls.dprint(u"not in project: %s" % buffer.url.path) return info
def createProject(cls, topdir): url = vfs.normalize(topdir) if url in cls.url_to_project_mapping: raise TypeError("Project already exists.") proj_dir = url.resolve2(cls.classprefs.project_directory) if not vfs.is_folder(proj_dir): if not vfs.exists(proj_dir): vfs.make_folder(proj_dir) else: raise TypeError("Can't create directory %s -- seems already exist as a file" % proj_dir) info = cls.registerProject(None, proj_dir) info.savePrefs() cls.dprint(info) buffers = BufferList.getBuffers() for buffer in buffers: if buffer.url.scheme != "file": continue cls.dprint(u"prefix=%s topdir=%s" % (buffer.url.path.get_prefix(url.path), url.path)) if buffer.url.path.get_prefix(url.path) == url.path: cls.dprint(u"belongs in project! %s" % buffer.url.path) for mode in buffer.iterViewers(): mode.project_info = info else: cls.dprint(u"not in project: %s" % buffer.url.path) return info
def __init__(self, filename=None, **kwargs): if 'debug' in kwargs: self.debug = kwargs['debug'] else: self.debug = False self['samples'] = "98" self['lines'] = "98" self['bands'] = "98" self['interleave'] = "bil" self.strings = ['description'] self.lists = [ 'wavelength', 'fwhm', 'sigma', 'band names', 'default bands', 'bbl', 'map info', 'spectra names' ] self.outputorder = [ 'description', 'samples', 'lines', 'bands', 'byte order', 'interleave' ] # To convert from python dict to object attributes, here's a # list of conversion functions that relate to a list of items # in the header file. self.convert = ( (int, [ 'samples', 'lines', 'bands', 'byte order', 'bbl', 'x start', 'header offset' ]), (float, ['wavelength', 'fwhm', 'sigma', 'reflectance scale factor']), (lambda s: s.lower(), ['interleave', 'sensor type']), (normalizeUnits, ['wavelength units']), (lambda s: enviDataType[int(s)], ['data type']), (lambda s: s, ['description', 'default bands']), (lambda s: s.strip(), ['band names', 'spectra names']), ) # convert from the object attributes to the ENVI text format self.unconvert = ((lambda s: enviDataType.index(s), ['data type']), ) # if attributes are specified here, it will convert the ENVI # header key to the attribute name used in the Cube attribute # list. Other ENVI keys will be converted to lower case and # have spaces replaced by underscores. self.attributeConvert = { 'reflectance scale factor': 'scale_factor', 'wavelength': 'wavelengths' } if filename: if isinstance(filename, Cube): self.getCubeAttributes(filename) else: filename = vfs.normalize(filename) self.headerurl, self.cubeurl = self.getFilePair(filename) self.open(self.headerurl) else: self.headerurl = None self.cubeurl = None
def cwd(self, use_vfs=False): """Find the current working directory of the buffer. Can be used in two ways based on use_vfs: use_vfs == True: uses the vfs to return the directory in the same scheme as the buffer use_vfs == False (the default): find the current working directory on the local filesystem. Some schemes, like tar, for instance, are overlays on the current filesystem and the cwd of those schemes with this sense of use_vfs will report the overlayed directory. """ if self.pending_url is not None: url = self.pending_url else: url = self.url if use_vfs: if vfs.is_folder(url): path = vfs.normalize(url) else: path = vfs.get_dirname(url) return path else: path = self._cwd(url) if (not path or path == '/') and self.created_from_url: path = self._cwd(self.created_from_url) if path == '/': path = wx.StandardPaths.Get().GetDocumentsDir() return path
def _cwd(self, url, use_vfs=False): """Find a directory in the local filesystem that corresponds to the given url. """ if url.scheme == 'file': path = os.path.normpath(os.path.dirname(unicode(url.path))) else: # If it's an absolute path, see if it converts to an existing path # in the local filesystem by converting it to a file:// url and # seeing if any path components exist lastpath = None temp = unicode(url.path) # Absolute path may be indicated by a drive letter and a colon # on windows if temp.startswith('/') or (len(temp) > 2 and temp[1] == ':'): uri = vfs.normalize(unicode(url.path)) path = os.path.normpath(unicode(uri.path)) while path != lastpath and path != '/': dprint("trying %s" % path) if os.path.isdir(path): break lastpath = path path = os.path.dirname(path) else: path = '/' return path
def setPendingSaveAsURL(self, url): """Set the future Save As URL to the new value Future Save As requests will default to the specified URL rather than the URL that was used to create the file """ self.pending_url = vfs.normalize(url)
def save(self, url=None): assert self.dprint(u"Buffer: saving buffer %s as %s" % (self.url, url)) try: if url is None: saveas = self.url else: saveas = vfs.normalize(url) self.stc.prepareEncoding() fh = self.stc.openFileForWriting(saveas) self.stc.writeTo(fh, saveas) self.stc.closeFileAfterWriting(fh) self.stc.SetSavePoint() self.removeAutosaveIfExists() if saveas != self.url: try: permissions = vfs.get_permissions(self.url) vfs.set_permissions(saveas, permissions) except OSError: # The original file may have been deleted, in which case # the permissions setting will fail. pass self.setURL(saveas) self.setName() self.readonly = not vfs.can_write(saveas) pub.sendMessage('buffer.opened', buffer=self) self.setInitialStateIsUnmodified() self.showModifiedAll() self.saveTimestamp() except IOError, e: eprint(u"Failed writing to %s: %s" % (self.url, e)) raise
def isURL(self, url): if self.canonicalize: url = vfs.canonical_reference(url) else: url = vfs.normalize(url) if url == self.url: return True return False
def initialActivation(self): pathname = wx.GetApp().getConfigFilePath(self.classprefs.macro_file) macro_url = vfs.normalize(pathname) try: MacroSaveData.load(macro_url) except: dprint("Failed loading macro data to %s" % macro_url) import traceback traceback.print_exc()
def isTitleBufferOnly(self): if len(self.getAllMajorModes()) > 1: return False mode = self.getActiveMajorMode() url = vfs.normalize(wx.GetApp().classprefs.title_page) dprint(u"%s == %s => %s" % (url, mode.buffer.url, mode.buffer.url == url)) if mode.buffer.url == url: return True return False
def requestedShutdown(self): pathname = wx.GetApp().getConfigFilePath(self.classprefs.macro_file) macro_url = vfs.normalize(pathname) try: MacroSaveData.save(macro_url) except: dprint("Failed saving macro data to %s" % macro_url) import traceback traceback.print_exc() pass
def findGlobalTemplate(cls, mode, url): """Find the global template that belongs to the particular major mode @param mode: major mode instance @param url: url of file that is being created """ subdir = wx.GetApp().config.fullpath(cls.classprefs.template_directory) template_url = vfs.normalize(subdir) fh = cls.getConfigFileHandle(template_url, mode, url) if fh: return fh.read()
def getSelectedMacros(self): """Return a list of all the selected macros @returns: a list containing the URL of the macro """ paths = [] for item in self.GetSelections(): path = self.GetPyData(item) if path is not None: paths.append(vfs.normalize("macro:%s" % path)) return paths
def identify(cls, url): fh = vfs.open(url) assert cls.dprint("checking for cube handler: %s" % dir(fh)) if fh and hasattr(fh, 'metadata') and hasattr(fh.metadata, 'getCube'): return fh.metadata url = vfs.normalize(url) matches = cls.identifyall(url) if len(matches)>0: return matches[0] return None
def completeScheme(self, text, uri, path): paths = [] # there's no scheme specified by the user, so complete on known # schemes pattern = text for name in vfs.get_file_system_schemes(): if not name.startswith(pattern): self.dprint("skipping %s because it doesn't start with %s" % (name, pattern)) continue paths.append(str(vfs.normalize(name + ":"))) return paths
def identify(cls, url): fh = vfs.open(url) assert cls.dprint("checking for cube handler: %s" % dir(fh)) if fh and hasattr(fh, 'metadata') and hasattr(fh.metadata, 'getCube'): return fh.metadata url = vfs.normalize(url) matches = cls.identifyall(url) if len(matches) > 0: return matches[0] return None
def save(self, filename=None): if filename: url = vfs.normalize(filename) if vfs.is_file(url): fh = vfs.open(url, vfs.WRITE) else: fh = vfs.make_file(url) if fh: fh.write(str(self)) fh.close() else: eprint("Couldn't open %s for writing.\n" % filename)
def save(self,filename=None): if filename: url = vfs.normalize(filename) if vfs.is_file(url): fh = vfs.open(url, vfs.WRITE) else: fh = vfs.make_file(url) if fh: fh.write(str(self)) fh.close() else: eprint("Couldn't open %s for writing.\n" % filename)
def __init__(self, filename=None, **kwargs): if 'debug' in kwargs: self.debug = kwargs['debug'] else: self.debug = False self['samples']="98" self['lines']="98" self['bands']="98" self['interleave']="bil" self.strings=['description'] self.lists=['wavelength','fwhm','sigma','band names','default bands', 'bbl', 'map info', 'spectra names'] self.outputorder=['description','samples','lines','bands','byte order','interleave'] # To convert from python dict to object attributes, here's a # list of conversion functions that relate to a list of items # in the header file. self.convert=( (int , ['samples','lines','bands','byte order','bbl','x start','header offset']), (float , ['wavelength','fwhm','sigma','reflectance scale factor']), (lambda s:s.lower() , ['interleave','sensor type']), (normalizeUnits, ['wavelength units']), (lambda s:enviDataType[int(s)], ['data type']), (lambda s:s, ['description','default bands']), (lambda s:s.strip(), ['band names', 'spectra names']), ) # convert from the object attributes to the ENVI text format self.unconvert=( (lambda s:enviDataType.index(s), ['data type']), ) # if attributes are specified here, it will convert the ENVI # header key to the attribute name used in the Cube attribute # list. Other ENVI keys will be converted to lower case and # have spaces replaced by underscores. self.attributeConvert={ 'reflectance scale factor':'scale_factor', 'wavelength':'wavelengths' } if filename: if isinstance(filename, Cube): self.getCubeAttributes(filename) else: filename = vfs.normalize(filename) self.headerurl, self.cubeurl = self.getFilePair(filename) self.open(self.headerurl) else: self.headerurl = None self.cubeurl = None
def showInitialPosition(self, url, options=None): if url.fragment: newurl = vfs.normalize(url.fragment) self.buffer.setPendingSaveAsURL(newurl) if url.query: for name, value in url.query.iteritems(): if name == "search": self.search_text.SetValue(value) elif name == "type": self.options.SetStringSelection(value) sel = self.options.GetSelection() self.buffer.stc.search_type.setIndex(sel) self.buffer.stc.update(url) wx.CallAfter(self.resetList)
def makeTabActive(self, url, options=None): """Make the tab current that corresponds to the url. If the url isn't found, nothing happens. @return: True if URL was found, False if not. """ normalized = vfs.normalize(url) self.dprint("url=%s normalized=%s" % (url, normalized)) mode = self.tabs.moveSelectionToURL(normalized) if mode: mode.showInitialPosition(normalized) if options: mode.setViewPositionData(options) return mode is not None
def getDatasetPath(self, name): """Convenience method to get a full pathname to the dataset filesystem that is based on the source pathname. This adds a prefix to the dataset pathname so that it corresponds to the same path used in the source image. This allows any save commands to use the same path on the filesystem as the initial directory shown in the file save dialog. """ cwd = self.mode.buffer.cwd() import peppy.vfs as vfs url = vfs.normalize(cwd) path = unicode(url.path) name = u"dataset:%s/%s" % (path, name) return name
def complete(self, text): uri = vfs.normalize(text) path = str(uri.path) self.dprint("uri=%s text=%s path=%s" % (str(uri), text, path)) try: if ':' in text: paths = self.completePath(text, uri, path) else: paths = self.completeScheme(text, uri, path) except: if self.debuglevel > 0: import traceback error = traceback.format_exc() dprint(error) paths = [] paths.sort() return paths
def complete(self, text): uri = vfs.normalize(text) path = str(uri.path) self.dprint("uri=%s text=%s path=%s" % (str(uri), text, path)) try: if ":" in text: paths = self.completePath(text, uri, path) else: paths = self.completeScheme(text, uri, path) except: if self.debuglevel > 0: import traceback error = traceback.format_exc() dprint(error) paths = [] paths.sort() return paths
def load(cls, url, bad=None, progress=None): """Find an HSI dataset instance corresponding to the url @param url: url to load @param bad: subclass of HSI.MetadataMixin that should be avoided. This is used to select a different dataset reader if an error occurs with the one specified here. @param progress: (optional) progress bar callback @return: instance of HSI.MetadataMixin that can read the file, or None if nothing is found. """ cls.discover() url = vfs.normalize(url) if bad: cls.dprint("EXCLUDING %s" % bad) # Check to see if there's a specific handler provided in the vfs fh = vfs.open(url) cls.dprint("checking for cube handler: %s" % dir(fh)) if fh and hasattr(fh, 'metadata') and hasattr(fh.metadata, 'getCube'): dataset = fh.metadata # Only return the dataset if it's not the same class we're trying # to avoid if dataset.__class__ != bad: return dataset # OK, that didn't return a result, so see if there's a HSI handler matches = cls.identifyall(url) for format in matches: if format == bad: cls.dprint("Skipping format %s" % format.format_name) continue cls.dprint("Loading %s format cube" % format.format_name) dataset = format(url, progress=progress) return dataset return None
def addMacro(cls, macro, dirname=None): if dirname: if not dirname.endswith("/"): dirname += "/" # Make sure the directory exists url = vfs.normalize("macro:%s" % dirname) needs_mkdir = False if vfs.exists(url): if vfs.is_file(url): # we have a macro that is the same name as the directory # name. Rename the file and create the directory. components = dirname.strip('/').split('/') filename = components.pop() parent_dirname = "/".join(components) dum, new_filename = cls.findAlternateName(parent_dirname, filename) #dprint("parent=%s filename=%s: New filename: %s" % (parent_dirname, filename, new_filename)) parent, existing, name = cls._find(parent_dirname) #dprint("existing=%s" % existing) existing[new_filename] = existing[filename] del existing[filename] #dprint("existing after=%s" % existing) needs_mkdir = True else: needs_mkdir = True if needs_mkdir: #dprint("Making folder %s" % url) vfs.make_folder(url) else: dirname = "" fullpath, basename = cls.findAlternateName(dirname, macro.name) parent, existing, name = cls._find(dirname) #dprint("name=%s: parent=%s, existing=%s" % (basename, parent, existing)) macro.setName(fullpath) existing[basename] = macro
def staticAnalysis(self, lang="fortran", regenerate=False): url = self.getSettingsRelativeURL("%s.static_analysis" % lang) dprint(url) filename = str(url.path) dprint(filename) if lang == "fortran": stats = FortranStaticAnalysis(serialized_filename=filename, regenerate=regenerate) if not regenerate and not stats.isEmpty(): return url for source in self.walkProjectDir(["*.f", "*.f90"]): print source stats.scan(source) stats.analyze() stats.summary() dprint(filename) stats.saveStateToFile() return url else: dprint("%s not supported for static analysis") return None if __name__ == "__main__": app = wx.PySimpleApp() ctags = ProjectInfo( vfs.normalize("/home/rob/src/peppy-git/.peppy-project")) print ctags.getTag('GetValue') ctags.regenerateTags()
def setURL(self, url=None): if url: self.url = vfs.normalize(url) else: self.url = None
def testHeader2(self): filename = localfile('hsi/test2.bip') urls = ENVI.findHeaders(filename) header = vfs.normalize(localfile('hsi/test2.bip.hdr')) eq_(urls[0], header)
name = "Same Project" inline = False def getItems(self): wrapper = self.popup_options['wrapper'] tab_mode = wrapper.editwin self.savelist = [] if self.mode.project_info: known_url = self.mode.project_info.project_settings_dir #dprint("known=%s" % (known_url)) for buf in BufferList.storage: url = ProjectPlugin.findProjectURL(buf.url) #dprint("buf.url=%s, project url=%s" % (buf.url, url)) if url == known_url: self.savelist.append(buf) return [buf.displayname for buf in self.savelist] def action(self, index=-1, multiplier=1): assert self.dprint("top window to %d: %s" % (index, self.savelist[index])) wrapper = self.popup_options['wrapper'] self.frame.setBuffer(self.savelist[index], wrapper) if __name__== "__main__": app = wx.PySimpleApp() ctags = ProjectInfo(vfs.normalize("/home/rob/src/peppy-git/.peppy-project")) print ctags.getTag('GetValue') ctags.regenerateTags()
def stop(self): if self.process: self.process.kill() def staticAnalysis(self, lang="fortran", regenerate=False): url = self.getSettingsRelativeURL("%s.static_analysis" % lang) dprint(url) filename = str(url.path) dprint(filename) if lang == "fortran": stats = FortranStaticAnalysis(serialized_filename=filename, regenerate=regenerate) if not regenerate and not stats.isEmpty(): return url for source in self.walkProjectDir(["*.f", "*.f90"]): print source stats.scan(source) stats.analyze() stats.summary() dprint(filename) stats.saveStateToFile() return url else: dprint("%s not supported for static analysis") return None if __name__== "__main__": app = wx.PySimpleApp() ctags = ProjectInfo(vfs.normalize("/home/rob/src/peppy-git/.peppy-project")) print ctags.getTag('GetValue') ctags.regenerateTags()
def __init__(self, stc): self.stc = stc self.url = vfs.normalize("nothing")