def lint_file(path, kind): def import_script(import_path): # The user can specify paths using backslashes (such as when # linting Windows scripts on a posix environment. import_path = import_path.replace('\\', os.sep) import_path = os.path.join(os.path.dirname(path), import_path) return lint_file(import_path, 'js') def _lint_error(*args): return lint_error(normpath, *args) normpath = util.normpath(path) if normpath in lint_cache: return lint_cache[normpath] print normpath contents = util.readfile(path) lint_cache[normpath] = _Script() script_parts = [] if kind == 'js': script_parts.append((None, contents)) elif kind == 'html': for script in _findhtmlscripts(contents): if script['type'] == 'external': other = import_script(script['src']) lint_cache[normpath].importscript(other) elif script['type'] == 'inline': script_parts.append((script['pos'], script['contents'])) else: assert False, 'Invalid internal script type %s' % \ script['type'] else: assert False, 'Unsupported file kind: %s' % kind _lint_script_parts(script_parts, lint_cache[normpath], _lint_error, conf, import_script) return lint_cache[normpath]
def __init__(self, root, is_type_editor): """ Args: root: path of project root folder is_type_editor: bool. see class docstring """ self.path = util.normpath(os.path.abspath(root)) self.is_type_editor = is_type_editor self._auto_create_clazz_folder = True # Must be the first self.event_manager = EventManager() self.type_manager = TypeManager() self.fs_manager = FileSystemManager( self, os.path.join(self.path, const.PROJECT_FOLDER_DATA)) # should after fs_manager self.object_manager = ObjectManager(self) # should after object_manager self.ref_manager = RefManager() # self._langauges = ('en', ) self._default_language = 'en' self._translations = {} self._verifier = None self._loading_errors = AttrVerifyLogger() self.tags = set() self._next_ids = {} # {clazz_name: next_id} self._loaded = False self._editor_project = None
def pathto(self, f, cwd=None): if cwd is None: cwd = self.getcwd() path = util.pathto(self._root, cwd, f) if self._slash: return util.normpath(path) return path
def doTextEdit(self, url, setCursor=False): """Process a textedit link and either highlight the corresponding source code or set the cursor to it. """ t = textedit.link(url) # Only process textedit links if not t: return False filename = util.normpath(t.filename) doc = self.document(filename, setCursor) if doc: cursor = QTextCursor(doc) b = doc.findBlockByNumber(t.line - 1) p = b.position() + t.column cursor.setPosition(p) cursors = pointandclick.positions(cursor) # Do highlighting if the document is active if cursors and doc == self.mainwindow().currentDocument(): import viewhighlighter view = self.mainwindow().currentView() viewhighlighter.highlighter(view).highlight( self._highlightFormat, cursors, 2, 0) # set the cursor and bring the document to front if setCursor: mainwindow = self.mainwindow() mainwindow.setTextCursor(cursor) import widgets.blink widgets.blink.Blinker.blink_cursor(mainwindow.currentView()) self.mainwindow().setCurrentDocument(doc) mainwindow.activateWindow() mainwindow.currentView().setFocus() return True
def __init__(self, root, is_type_editor): """ Args: root: path of project root folder is_type_editor: bool. see class docstring """ self.path = util.normpath(os.path.abspath(root)) self.is_type_editor = is_type_editor self._auto_create_clazz_folder = True # Must be the first self.event_manager = EventManager() self.type_manager = TypeManager() self.fs_manager = FileSystemManager(self, os.path.join(self.path, const.PROJECT_FOLDER_DATA)) # should after fs_manager self.object_manager = ObjectManager(self) # should after object_manager self.ref_manager = RefManager() # self._langauges = ('en', ) self._default_language = 'en' self._translations = {} self._verifier = None self._loading_errors = AttrVerifyLogger() self.tags = set() self._next_ids = {} # {clazz_name: next_id} self._loaded = False self._editor_project = None
def tidyprefix(dest, kind, prefix): '''choose prefix to use for names in archive. make sure prefix is safe for consumers.''' if prefix: prefix = util.normpath(prefix) else: if not isinstance(dest, str): raise ValueError('dest must be string if no prefix') prefix = os.path.basename(dest) lower = prefix.lower() for sfx in exts.get(kind, []): if lower.endswith(sfx): prefix = prefix[:-len(sfx)] break lpfx = os.path.normpath(util.localpath(prefix)) prefix = util.pconvert(lpfx) if not prefix.endswith('/'): prefix += '/' # Drop the leading '.' path component if present, so Windows can read the # zip files (issue4634) if prefix.startswith('./'): prefix = prefix[2:] if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: raise util.Abort(_('archive prefix contains illegal components')) return prefix
def doTextEdit(self, url, setCursor=False): """Process a textedit link and either highlight the corresponding source code or set the cursor to it. """ t = textedit.link(url) # Only process textedit links if not t: return False filename = util.normpath(t.filename) doc = self.document(filename, setCursor) if doc: cursor = QTextCursor(doc) b = doc.findBlockByNumber(t.line - 1) p = b.position() + t.column cursor.setPosition(p) cursors = pointandclick.positions(cursor) # Do highlighting if the document is active if cursors and doc == self.mainwindow().currentDocument(): import viewhighlighter view = self.mainwindow().currentView() viewhighlighter.highlighter(view).highlight(self._highlightFormat, cursors, 2, 0) # set the cursor and bring the document to front if setCursor: mainwindow = self.mainwindow() mainwindow.setTextCursor(cursor) import widgets.blink widgets.blink.Blinker.blink_cursor(mainwindow.currentView()) self.mainwindow().setCurrentDocument(doc) mainwindow.activateWindow() mainwindow.currentView().setFocus() return True
def _generate_dependencies(self, solution): includes = [] for p in solution.projects: abs_project_file = p.project_file abs_gyp_file, _ = os.path.splitext(abs_project_file) relative_path_to_sln = util.normpath(os.path.relpath(abs_gyp_file, solution.solution_dir)) includes.append(relative_path_to_sln + ".gyp:" + p.name) return includes
def _generate_proj_include_dirs(self, project, configurations): include_dirs = project.compile_options.get_common_value_for_configurations(configurations, 'AdditionalIncludeDirectories') if include_dirs != None: if len(include_dirs) > 0: result = [] for d in include_dirs: result.append(util.normpath(d)) return result return None
def open(ui, url_, data=None): u = util.url(url_) if u.scheme: u.scheme = u.scheme.lower() url_, authinfo = u.authinfo() else: path = util.normpath(os.path.abspath(url_)) url_ = 'file://' + urllib.pathname2url(path) authinfo = None return opener(ui, authinfo).open(url_, data)
def _normalize(names, default, root, cwd): pats = [] for kind, name in [_patsplit(p, default) for p in names]: if kind in ('glob', 'relpath'): name = util.canonpath(root, cwd, name) elif kind in ('relglob', 'path'): name = util.normpath(name) pats.append((kind, name)) return pats
def _normalize(names, default, root, cwd, auditor): pats = [] for kind, name in [_patsplit(p, default) for p in names]: if kind in ('glob', 'relpath'): name = util.canonpath(root, cwd, name, auditor) elif kind in ('relglob', 'path'): name = util.normpath(name) pats.append((kind, name)) return pats
def open(ui, url, data=None): scheme = None m = scheme_re.search(url) if m: scheme = m.group(1).lower() if not scheme: path = util.normpath(os.path.abspath(url)) url = 'file://' + urllib.pathname2url(path) authinfo = None else: url, authinfo = getauthinfo(url) return opener(ui, authinfo).open(url, data)
def _generate_proj_dependencies(self, solution, project): dependencies = [] for dep_guid in project.dependencies: for p in solution.projects: if p.guid == project.guid: continue if p.guid == dep_guid: dep_proj_path = p.project_file dep_gyp_name, ext = os.path.splitext(dep_proj_path) dep_gyp_path = dep_gyp_name + '.gyp' rel_gyp_path = util.normpath(os.path.relpath(dep_gyp_path, project.project_dir)) dependencies.append(rel_gyp_path + ":" + p.name) return dependencies
def dragElement(self, url): t = textedit.link(url) # Only process textedit links if not t: return False filename = util.normpath(t.filename) doc = self.document(filename, True) if doc: cursor = QTextCursor(doc) b = doc.findBlockByNumber(t.line - 1) p = b.position() + t.column cursor.setPosition(p) self.emitCursor(cursor)
def cursor(self, link, load=False): """Returns the destination of a link as a QTextCursor of the destination document. If load (defaulting to False) is True, the document is loaded if it is not yet loaded. Returns None if the url was not valid or the document could not be loaded. """ import qpageview.link if not isinstance(link, qpageview.link.Link) or not link.url: return t = textedit.link(link.url) if t: filename = util.normpath(t.filename) return super(Links, self).cursor(filename, t.line, t.column, load)
def cursor(self, link, load=False): """Returns the destination of a link as a QTextCursor of the destination document. If load (defaulting to False) is True, the document is loaded if it is not yet loaded. Returns None if the url was not valid or the document could not be loaded. """ import popplerqt5 if not isinstance(link, popplerqt5.Poppler.LinkBrowse) or not link.url(): return t = textedit.link(link.url()) if t: filename = util.normpath(t.filename) return super(Links, self).cursor(filename, t.line, t.column, load)
def loadpath(path, module_name): module_name = module_name.replace('.', '_') path = util.normpath(util.expandpath(path)) if os.path.isdir(path): # module/__init__.py style d, f = os.path.split(path) fd, fpath, desc = imp.find_module(f, [d]) return imp.load_module(module_name, fd, fpath, desc) else: try: return imp.load_source(module_name, path) except IOError, exc: if not exc.filename: exc.filename = path # python does not fill this raise
def slotJobOutput(self, message, type): """Called whenever the job has output. The output is checked for error messages that contain a filename:line:column expression. """ if type == job.STDERR: enc = sys.getfilesystemencoding() for m in message_re.finditer(message.encode('latin1')): url = m.group(1).decode(enc) filename = m.group(2).decode(enc) filename = util.normpath(filename) line, column = int(m.group(3)), int(m.group(4) or 0) self._refs[url] = Reference(filename, line, column)
def loadpath(path, module_name): module_name = module_name.replace('.', '_') path = util.normpath(util.expandpath(path)) if os.path.isdir(path): # module/__init__.py style d, f = os.path.split(path) fd, fpath, desc = imp.find_module(f, [d]) return imp.load_module(module_name, fd, fpath, desc) else: try: return imp.load_source(module_name, path) except IOError as exc: if not exc.filename: exc.filename = path # python does not fill this raise
def readfilename(match): """Returns the filename from the match object resulting from textedit_match.""" fname = match.group(1) lat1 = fname.encode('latin1') try: lat1 = percentcoding.decode(lat1) except ValueError: pass try: fname = lat1.decode(sys.getfilesystemencoding()) except UnicodeError: pass # normalize path (although this might change a path if it contains # symlinks followed by '/../' ! fname = util.normpath(fname) return fname
def readfilename(match): """Return the filename from the match object resulting from textedit_match.""" fname = match.group(1) lat1 = fname.encode('latin1') try: lat1 = percentcoding.decode(lat1) except ValueError: pass try: fname = lat1.decode(sys.getfilesystemencoding()) except UnicodeError: pass # normalize path (although this might change a path if it contains # symlinks followed by '/../' ! fname = util.normpath(fname) return fname
def links(document): try: return _cache[document] except KeyError: l = _cache[document] = Links() with l: import popplerqt5 with qpageview.locking.lock(document): for num in range(document.numPages()): page = document.page(num) for link in page.links(): if isinstance(link, popplerqt5.Poppler.LinkBrowse): t = textedit.link(link.url()) if t: filename = util.normpath(t.filename) l.add_link(filename, t.line, t.column, (num, link.linkArea())) return l
def links(document): try: return _cache[document] except KeyError: l = _cache[document] = Links() with l: import popplerqt5 with qpopplerview.lock(document): for num in range(document.numPages()): page = document.page(num) for link in page.links(): if isinstance(link, popplerqt5.Poppler.LinkBrowse): t = textedit.link(link.url()) if t: filename = util.normpath(t.filename) l.add_link(filename, t.line, t.column, (num, link.linkArea())) return l
def _normalize(names, default, root, cwd, auditor): pats = [] for kind, name in [_patsplit(p, default) for p in names]: if kind in ('glob', 'relpath'): name = util.canonpath(root, cwd, name, auditor) elif kind in ('relglob', 'path'): name = util.normpath(name) elif kind in ('listfile', 'listfile0'): delimiter = kind == 'listfile0' and '\0' or '\n' try: files = open(name, 'r').read().split(delimiter) files = [f for f in files if f] except EnvironmentError: raise util.Abort(_("unable to read file list (%s)") % name) pats += _normalize(files, default, root, cwd, auditor) continue pats.append((kind, name)) return pats
def add_manage_subparser(subparsers): p = subparsers.add_parser('manage', help=('copy a file to the dotpary directory, replace the original ' 'with a link, and add the new file to the repo (if possible)')) add_debug_argument(p) p.add_argument( 'path', type=lambda p: util.normpath(p, absolute=True), help='the path to the source file to manage' ) p.add_argument( '-f', '--force', action='store_true', help='overwrite any existing files in the dotparty directory' ) p.set_defaults(command=dotparty.manage)
def _normalize(self, patterns, default, root, cwd, auditor): '''Convert 'kind:pat' from the patterns list to tuples with kind and normalized and rooted patterns and with listfiles expanded.''' kindpats = [] for kind, pat in [_patsplit(p, default) for p in patterns]: if kind in ('glob', 'relpath'): pat = pathutil.canonpath(root, cwd, pat, auditor) elif kind in ('relglob', 'path'): pat = util.normpath(pat) elif kind in ('listfile', 'listfile0'): try: files = util.readfile(pat) if kind == 'listfile0': files = files.split('\0') else: files = files.splitlines() files = [f for f in files if f] except EnvironmentError: raise util.Abort(_("unable to read file list (%s)") % pat) for k, p, source in self._normalize(files, default, root, cwd, auditor): kindpats.append((k, p, pat)) continue elif kind == 'include': try: includepats = readpatternfile(pat, self._warn) for k, p, source in self._normalize( includepats, default, root, cwd, auditor): kindpats.append((k, p, source or pat)) except util.Abort as inst: raise util.Abort('%s: %s' % (pat, inst[0])) except IOError as inst: if self._warn: self._warn( _("skipping unreadable pattern file " "'%s': %s\n") % (pat, inst.strerror)) continue # else: re or relre - which cannot be normalized kindpats.append((kind, pat, '')) return kindpats
def load_config(user_path=constants.USER_CONFIG_PATH, default_path=constants.DEFAULT_CONFIG_PATH): '''Load the default dotparty config file, then overlay the user's on top.''' # make sure we can load our default config file assert os.path.exists(default_path) # load the default config default_config = {} with open(default_path) as f: default_config = json.load(f) # load the user's config if one exists user_config = {} if os.path.exists(user_path): with open(user_path) as f: user_config = json.load(f) # build a config of the default values custom-merged with the user's config = {} config.update(default_config) config.update(user_config) # use the user's ignored values in addition to, not instead of, ours. we need # this because we always need to ignore our own files, and it's cleaner to do # so through the normal ignore channel than to write custom checks everywhere. if 'ignore' in user_config: config['ignore'] = frozenset(default_config['ignore'] + user_config['ignore']) # expand globs in the ignored list and root them in the dotparty directory config['ignore'] = frozenset( util.expand_globs(config['ignore'], root=constants.REPO_DIR)) # normalize the destination directory config['destination'] = util.normpath(config['destination']) # TODO: handle packages return config
def _normalize(names, default, root, cwd, auditor): pats = [] for kind, name in [_patsplit(p, default) for p in names]: if kind in ('glob', 'relpath'): name = scmutil.canonpath(root, cwd, name, auditor) elif kind in ('relglob', 'path'): name = util.normpath(name) elif kind in ('listfile', 'listfile0'): try: files = util.readfile(name) if kind == 'listfile0': files = files.split('\0') else: files = files.splitlines() files = [f for f in files if f] except EnvironmentError: raise util.Abort(_("unable to read file list (%s)") % name) pats += _normalize(files, default, root, cwd, auditor) continue pats.append((kind, name)) return pats
def _normalize(self, patterns, default, root, cwd, auditor): '''Convert 'kind:pat' from the patterns list to tuples with kind and normalized and rooted patterns and with listfiles expanded.''' kindpats = [] for kind, pat in [_patsplit(p, default) for p in patterns]: if kind in ('glob', 'relpath'): pat = pathutil.canonpath(root, cwd, pat, auditor) elif kind in ('relglob', 'path'): pat = util.normpath(pat) elif kind in ('listfile', 'listfile0'): try: files = util.readfile(pat) if kind == 'listfile0': files = files.split('\0') else: files = files.splitlines() files = [f for f in files if f] except EnvironmentError: raise util.Abort(_("unable to read file list (%s)") % pat) for k, p, source in self._normalize(files, default, root, cwd, auditor): kindpats.append((k, p, pat)) continue elif kind == 'include': try: includepats = readpatternfile(pat, self._warn) for k, p, source in self._normalize(includepats, default, root, cwd, auditor): kindpats.append((k, p, source or pat)) except util.Abort as inst: raise util.Abort('%s: %s' % (pat, inst[0])) except IOError as inst: if self._warn: self._warn(_("skipping unreadable pattern file " "'%s': %s\n") % (pat, inst.strerror)) continue # else: re or relre - which cannot be normalized kindpats.append((kind, pat, '')) return kindpats
def tidyprefix(dest, kind, prefix): """choose prefix to use for names in archive. make sure prefix is safe for consumers.""" if prefix: prefix = util.normpath(prefix) else: if not isinstance(dest, str): raise ValueError("dest must be string if no prefix") prefix = os.path.basename(dest) lower = prefix.lower() for sfx in exts.get(kind, []): if lower.endswith(sfx): prefix = prefix[: -len(sfx)] break lpfx = os.path.normpath(util.localpath(prefix)) prefix = util.pconvert(lpfx) if not prefix.endswith("/"): prefix += "/" if prefix.startswith("../") or os.path.isabs(lpfx) or "/../" in prefix: raise util.Abort(_("archive prefix contains illegal components")) return prefix
def tidyprefix(dest, prefix, suffixes): '''choose prefix to use for names in archive. make sure prefix is safe for consumers.''' if prefix: prefix = util.normpath(prefix) else: if not isinstance(dest, str): raise ValueError('dest must be string if no prefix') prefix = os.path.basename(dest) lower = prefix.lower() for sfx in suffixes: if lower.endswith(sfx): prefix = prefix[:-len(sfx)] break lpfx = os.path.normpath(util.localpath(prefix)) prefix = util.pconvert(lpfx) if not prefix.endswith('/'): prefix += '/' if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: raise util.Abort(_('archive prefix contains illegal components')) return prefix
def _expandsubinclude(kindpats, root): '''Returns the list of subinclude matchers and the kindpats without the subincludes in it.''' relmatchers = [] other = [] for kind, pat, source in kindpats: if kind == 'subinclude': sourceroot = pathutil.dirname(util.normpath(source)) pat = util.pconvert(pat) path = pathutil.join(sourceroot, pat) newroot = pathutil.dirname(path) relmatcher = match(newroot, '', [], ['include:%s' % path]) prefix = pathutil.canonpath(root, root, newroot) if prefix: prefix += '/' relmatchers.append((prefix, relmatcher)) else: other.append((kind, pat, source)) return relmatchers, other
def _normalize(patterns, default, root, cwd, auditor): '''Convert 'kind:pat' from the patterns list to tuples with kind and normalized and rooted patterns and with listfiles expanded.''' kindpats = [] for kind, pat in [_patsplit(p, default) for p in patterns]: if kind in ('glob', 'relpath'): pat = pathutil.canonpath(root, cwd, pat, auditor) elif kind in ('relglob', 'path'): pat = util.normpath(pat) elif kind in ('listfile', 'listfile0'): try: files = util.readfile(pat) if kind == 'listfile0': files = files.split('\0') else: files = files.splitlines() files = [f for f in files if f] except EnvironmentError: raise util.Abort(_("unable to read file list (%s)") % pat) kindpats += _normalize(files, default, root, cwd, auditor) continue # else: re or relre - which cannot be normalized kindpats.append((kind, pat)) return kindpats
def parse(self, file): self._solution._file = util.normpath(os.path.abspath(file)) for line in open(file, 'r'): self._append(line)
def _append(self, line): m = self._re_project.search(line) if not m == None: file = util.normpath(self._solution.solution_dir + '/' + m.group('project_file')) classid = m.group('classid') guid = m.group('project_classid') name = m.group('project_name') self._project = Project(file, name, guid) status = Solution.Parser.Status(Solution.Parser.StatusType.kProject, classid) self._status.push(status) return m = self._re_endproject.search(line) if not m == None: self._solution._projects.append(self._project) self._project = None if self._status.peek().status_type() != Solution.Parser.StatusType.kProject: raise "Invalid .sln format" self._status.pop() return m = self._re_globalsection.search(line) if not m == None: section_name = m.group('section_name') status = Solution.Parser.Status(Solution.Parser.StatusType.kGlobalSection, section_name) self._status.push(status) return m = self._re_endglobalsection.search(line) if not m == None: if self._status.peek().status_type() != Solution.Parser.StatusType.kGlobalSection: raise "Invalid .sln format" self._status.pop() return m = self._re_projectsection.search(line) if m != None: section_name = m.group('section_name') status = Solution.Parser.Status(Solution.Parser.StatusType.kProjectSection, section_name) self._status.push(status) return m = self._re_endprojectsection.search(line) if m != None: if self._status.peek().status_type() != Solution.Parser.StatusType.kProjectSection: raise "Invalid .sln format" self._status.pop() return current_status = self._status.peek() if current_status == None: #TODO: error handling. self._status hould have 'root' status return if current_status.status_type() == Solution.Parser.StatusType.kGlobalSection: if current_status.name() == 'SolutionConfigurationPlatforms': rhs, lhs = line.split('=') configuration = Configuration.create_from_string(lhs.strip()) self._solution._configurations.append(configuration) elif current_status.status_type() == Solution.Parser.StatusType.kProjectSection: if current_status.name() == 'ProjectDependencies': rhs, lhs = line.split('=') guid = self._project.guid depends = lhs.strip() self._project._dependencies.append(depends)
from __future__ import unicode_literals import os import util # the current version of dotparty VERSION = (0, 0, 0) # the git remote where the dotparty repository lives. used for updating dotparty # to the latest version. GIT_REMOTE = 'https://github.com/jasontbradshaw/dotparty.git' # the directories that the dotparty files and script live in # NOTE: these assume that these files are within the script directory! REPO_DIR = os.path.dirname(util.normpath(os.path.join(__file__, '../'))) SCRIPT_DIR = os.path.dirname(util.normpath(__file__)) # config file paths MACHINE_ID_PATH = util.normpath('~/.party-machine') USER_CONFIG_PATH = util.normpath('~/.party.json') DEFAULT_CONFIG_PATH = util.normpath( os.path.join(SCRIPT_DIR, 'party-default.json')) # the characters used in our special file names DOT_CHARACTER = '_' MACHINE_SEPARATOR_CHARACTER = '@'