def havemarks(*args, **kwargs): origin = kwargs.get('origin', '') for i, v in enumerate(args): if not hasattr(v, 'mark'): raise AssertionError('Value #{0}/{1} ({2!r}) has no attribute `mark`'.format(origin, i, v)) if isinstance(v, dict): for key, val in v.items(): havemarks(key, val, origin=(origin + '[' + unicode(i) + ']/' + unicode(key))) elif isinstance(v, list): havemarks(*v, origin=(origin + '[' + unicode(i) + ']'))
def havemarks(*args, **kwargs): origin = kwargs.get('origin', '') for i, v in enumerate(args): if not hasattr(v, 'mark'): raise AssertionError( 'Value #{0}/{1} ({2!r}) has no attribute `mark`'.format( origin, i, v)) if isinstance(v, dict): for key, val in v.items(): havemarks(key, val, origin=(origin + '[' + unicode(i) + ']/' + unicode(key))) elif isinstance(v, list): havemarks(*v, origin=(origin + '[' + unicode(i) + ']'))
def check_tuple(self, value, context_mark, data, context, echoerr, start, end): '''Check that given value is a list with items matching specifications :param int start: First specification index. :param int end: Specification index that is greater by 1 then last specification index. This method checks that each item in the value list matches specification with index ``start + item_number``. ''' havemarks(value) hadproblem = False for (i, item, spec) in zip(itertools.count(), value, self.specs[start:end]): proceed, ihadproblem = spec.match( item, value.mark, data, context.enter_item('tuple item ' + unicode(i), item), echoerr ) if ihadproblem: hadproblem = True if not proceed: return False, hadproblem return True, hadproblem
def __init__(self, stream): self.name = None self.stream = None self.stream_pointer = 0 self.eof = True self.buffer = '' self.pointer = 0 self.full_buffer = unicode('') self.full_pointer = 0 self.raw_buffer = None self.raw_decode = codecs.utf_8_decode self.encoding = 'utf-8' self.index = 0 self.line = 0 self.column = 0 self.stream = stream self.name = getattr(stream, 'name', '<file>') self.eof = False self.raw_buffer = None while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): self.update_raw() self.update(1)
def name(self): '''Return human-readable name Tries in order: #. Current bookmark name. #. Current tag. #. Branch name if current changeset is the tip of its branch. #. Revision number. ''' repo = self._repo() try: ret = repo._bookmarkcurrent except AttributeError: pass else: if ret: return ret cs = repo['.'] try: return cs.tags()[0] except IndexError: branch = cs.branch() if repo.branchtip(branch) == cs.node(): return branch else: return unicode(cs.rev())
def process_csv_buffer(pl, buffer, line, col, display_name): global csv_cache if csv_cache is None: csv_cache = register_buffer_cache( defaultdict(lambda: (None, None, None))) try: cur_first_line = buffer[0] except UnicodeDecodeError: cur_first_line = vim.eval('strtrans(getline(1))') dialect, has_header, first_line = csv_cache[buffer.number] if dialect is None or (cur_first_line != first_line and display_name == 'auto'): try: text = '\n'.join(buffer[:CSV_SNIFF_LINES]) except UnicodeDecodeError: # May happen in Python 3 text = vim.eval( 'join(map(getline(1, {0}), "strtrans(v:val)"), "\\n")'.format( CSV_SNIFF_LINES)) try: dialect, has_header = detect_text_csv_dialect(text, display_name) except csv.Error as e: pl.warn('Failed to detect csv format: {0}', str(e)) # Try detecting using three lines only: if line == 1: rng = (0, line + 2) elif line == len(buffer): rng = (line - 3, line) else: rng = (line - 2, line + 1) try: dialect, has_header = detect_text_csv_dialect( '\n'.join(buffer[rng[0]:rng[1]]), display_name, header_text='\n'.join(buffer[:4]), ) except csv.Error as e: pl.error('Failed to detect csv format: {0}', str(e)) return None, None if len(buffer) > 2: csv_cache[buffer.number] = dialect, has_header, cur_first_line column_number = len( read_csv( buffer[max(0, line - CSV_PARSE_LINES):line - 1] + [buffer[line - 1][:col]], dialect=dialect, fin=list, )[-1]) or 1 if has_header: try: header = read_csv(buffer[0:1], dialect=dialect) except UnicodeDecodeError: header = read_csv([vim.eval('strtrans(getline(1))')], dialect=dialect) column_name = header[column_number - 1] else: column_name = None return unicode(column_number), column_name
def check_list(self, value, context_mark, data, context, echoerr, item_func, msg_func): '''Check that each value in the list matches given specification :param function item_func: Callable like ``func`` from :py:meth:`Spec.check_func`. Unlike ``func`` this callable is called for each value in the list and may be a :py:class:`Spec` object index. :param func msg_func: Callable like ``msg_func`` from :py:meth:`Spec.check_func`. Should accept one problematic item and is not used for :py:class:`Spec` object indicies in ``item_func`` method. :return: proceed, hadproblem. ''' havemarks(value) i = 0 hadproblem = False for item in value: havemarks(item) if isinstance(item_func, int): spec = self.specs[item_func] proceed, fhadproblem = spec.match( item, value.mark, data, context.enter_item('list item ' + unicode(i), item), echoerr ) else: proceed, echo, fhadproblem = item_func(item, data, context, echoerr) if echo and fhadproblem: echoerr(context=self.cmsg.format(key=context.key + '/list item ' + unicode(i)), context_mark=value.mark, problem=msg_func(item), problem_mark=item.mark) if fhadproblem: hadproblem = True if not proceed: return proceed, hadproblem i += 1 return True, hadproblem
def process_csv_buffer(pl, buffer, line, col, display_name): global csv_cache if csv_cache is None: csv_cache = register_buffer_cache(defaultdict(lambda: (None, None, None))) try: cur_first_line = buffer[0] except UnicodeDecodeError: cur_first_line = vim.eval('strtrans(getline(1))') dialect, has_header, first_line = csv_cache[buffer.number] if dialect is None or (cur_first_line != first_line and display_name == 'auto'): try: text = '\n'.join(buffer[:CSV_SNIFF_LINES]) except UnicodeDecodeError: # May happen in Python 3 text = vim.eval('join(map(getline(1, {0}), "strtrans(v:val)"), "\\n")'.format(CSV_SNIFF_LINES)) try: dialect, has_header = detect_text_csv_dialect(text, display_name) except csv.Error as e: pl.warn('Failed to detect csv format: {0}', str(e)) # Try detecting using three lines only: if line == 1: rng = (0, line + 2) elif line == len(buffer): rng = (line - 3, line) else: rng = (line - 2, line + 1) try: dialect, has_header = detect_text_csv_dialect( '\n'.join(buffer[rng[0]:rng[1]]), display_name, header_text='\n'.join(buffer[:4]), ) except csv.Error as e: pl.error('Failed to detect csv format: {0}', str(e)) return None, None if len(buffer) > 2: csv_cache[buffer.number] = dialect, has_header, cur_first_line column_number = len(read_csv( buffer[max(0, line - CSV_PARSE_LINES):line - 1] + [buffer[line - 1][:col]], dialect=dialect, fin=list, )[-1]) or 1 if has_header: try: header = read_csv(buffer[0:1], dialect=dialect) except UnicodeDecodeError: header = read_csv([vim.eval('strtrans(getline(1))')], dialect=dialect) column_name = header[column_number - 1] else: column_name = None return unicode(column_number), column_name
def scan_flow_scalar(self): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted # scalars don’t need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. chunks = [] start_mark = self.get_mark() quote = self.peek() self.forward() chunks.extend(self.scan_flow_scalar_non_spaces(start_mark)) while self.peek() != quote: chunks.extend(self.scan_flow_scalar_spaces(start_mark)) chunks.extend(self.scan_flow_scalar_non_spaces(start_mark)) self.forward() end_mark = self.get_mark() return tokens.ScalarToken(unicode().join(chunks), False, start_mark, end_mark, '"')
def scan_flow_scalar(self): # See the specification for details. # Note that we loose indentation rules for quoted scalars. Quoted # scalars don't need to adhere indentation because " and ' clearly # mark the beginning and the end of them. Therefore we are less # restrictive then the specification requires. We only need to check # that document separators are not included in scalars. chunks = [] start_mark = self.get_mark() quote = self.peek() self.forward() chunks.extend(self.scan_flow_scalar_non_spaces(start_mark)) while self.peek() != quote: chunks.extend(self.scan_flow_scalar_spaces(start_mark)) chunks.extend(self.scan_flow_scalar_non_spaces(start_mark)) self.forward() end_mark = self.get_mark() return tokens.ScalarToken(unicode().join(chunks), False, start_mark, end_mark, '"')
def check_highlight_groups(hl_groups, data, context, echoerr): havemarks(hl_groups) rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups] if all(rs): echoerr( context='Error while checking theme (key {key})'.format(key=context.key), problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format( list_sep.join((unicode(h) for h in hl_groups))), problem_mark=hl_groups.mark ) for r, hl_group in zip(rs, hl_groups): echoerr( context='Error while checking theme (key {key})'.format(key=context.key), problem='found highlight group {0} not defined in the following colorschemes: {1}'.format( hl_group, list_sep.join(r)), problem_mark=hl_group.mark ) return True, False, True return True, False, False
def formatvalue(val): if type(val) is str: return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace( '\\', '\\\\') + '"' else: return '=' + repr(val)
vim_encoding = get_vim_encoding() python_to_vim_types = { unicode: ( lambda o: b'\'' + (o.translate({ ord('\''): '\'\'', }).encode(vim_encoding)) + b'\'' ), list: ( lambda o: b'[' + ( b','.join((python_to_vim(i) for i in o)) ) + b']' ), bytes: (lambda o: b'\'' + o.replace(b'\'', b'\'\'') + b'\''), int: (str if str is bytes else (lambda o: unicode(o).encode('ascii'))), } python_to_vim_types[float] = python_to_vim_types[int] def python_to_vim(o): return python_to_vim_types[type(o)](o) if sys.version_info < (3,): def str_to_bytes(s): return s def unicode_eval(expr): ret = vim.eval(expr) return ret.decode(vim_encoding, 'powerline_vim_strtrans_error')
try: vim_encoding = vim.eval('&encoding') except AttributeError: vim_encoding = 'utf-8' python_to_vim_types = { unicode: ( lambda o: b'\'' + (o.translate({ ord('\''): '\'\'', }).encode(vim_encoding)) + b'\'' ), bytes: (lambda o: b'\'' + o.replace(b'\'', b'\'\'') + b'\''), int: (str if str is bytes else (lambda o: unicode(o).encode('ascii'))), } python_to_vim_types[float] = python_to_vim_types[int] def python_to_vim(o): return python_to_vim_types[type(o)](o) if sys.version_info < (3,): def str_to_bytes(s): return s def unicode_eval(expr): ret = vim.eval(expr) return ret.decode(vim_encoding, 'powerline_vim_strtrans_error')
def arg_to_unicode(s): return unicode(s, encoding, 'replace') if not isinstance(s, unicode) else s # NOQA
def arg_to_unicode(s): return unicode(s, encoding, "replace") if not isinstance(s, unicode) else s # NOQA
def short(self): return unicode(self._repo()['.'].rev())
def short(self): return unicode(self._wt().branch.revno())
def formatvalue(val): if type(val) is str: return '="' + unicode(val, 'utf-8').replace('"', '\\"').replace('\\', '\\\\') + '"' else: return '=' + repr(val)
def join(self, iterable): return super(JStr, self).join((unicode(item) for item in iterable))