def param_repr(p): if isinstance(p, (int, float)): return ustr(p) elif isinstance(p, strtype) and p.isalnum(): return ustr(p) else: return urepr(p)
def __str__(self): comments = self.comments_str() params = ', '.join( self.param_repr(p) for p in self.params ) if self.params else '' kwparams = '' if self.kwparams: kwparams = ', '.join( '%s=%s' % (k, self.param_repr(v)) for (k, v) in self.kwparams.items() ) if params and kwparams: params = '(%s, %s)' % (params, kwparams) elif kwparams: params = '(%s)' % (kwparams) elif params: params = '(%s)' % params base = ' < %s' % ustr(self.base.name) if self.base else '' return trim(self.str_template).format( name=self.name, base=base, params=params, exp=indent(str(self.exp)), comments=comments )
def test_numbers_and_unicode(self): grammar = ''' rúle(1, -23, 4.56, 7.89e-11, 0xABCDEF, Añez) = 'a' ; ''' rule2 = ''' rulé(Añez) = '\\xf1' ; ''' rule3 = ''' rúlé(Añez) = 'ñ' ; ''' if PY3: grammar += rule3 else: grammar += rule2 model = genmodel("test", grammar) self.assertEquals(trim(grammar), ustr(model))
def test_patterns_with_newlines(self): grammar = ''' start = blanklines $ ; blanklines = blankline [blanklines] ; blankline = /^[^\n]*\n?$/ ; blankline2 = ?/^[^\n]*\n?$/? ; ''' model = genmodel("test", grammar) ast = model.parse('\n\n') self.assertEqual('', ustr(ast))
def __str__(self): exp = ustr(self.exp) template = '[%s]' if isinstance(self.exp, Choice): template = trim(self.str_template) elif isinstance(self.exp, Group): exp = self.exp.exp return template % exp
def __str__(self): comments = self.comments_str() seq = [ustr(s) for s in self.sequence] single = ' '.join(seq) if len(single) <= PEP8_LLEN and len(single.splitlines()) <= 1: return comments + single else: return comments + '\n'.join(seq)
def test_36_params_and_keyword_params(self): grammar = ''' rule(A, kwdB=B) = 'a' ; ''' model = genmodel("test", grammar) self.assertEquals(trim(grammar), ustr(model))
def __str__(self): directives = '' if 'whitespace' in self.directives: directives += '@@whitespace :: /%s/\n' % self.directives['whitespace'] if 'nameguard' in self.directives: directives += '@@nameguard :: %s\n' % self.directives['nameguard'] if 'left_recursion' in self.directives: directives += '@@left_recursion: %s\n' % self.directives['left_recursion'] if 'comments' in self.directives: directives += '@@comments :: /%s/\n' % ustr(self.directives['comments']) if 'eol_comments' in self.directives: directives += '@@eol_comments :: /%s/\n' % self.directives['eol_comments'] directives = directives + '\n' if directives else '' rules = ( '\n\n'.join(ustr(rule) for rule in self.rules) ).rstrip() + '\n' return directives + rules
def __str__(self): pattern = ustr(self.pattern) if '/' not in pattern: template = '/%s/' return template % pattern else: template = '?/%s/?' result = template % pattern if result.count('?') % 2: result += '?' # for the VIM syntax return result
def parse(self, ctx): with ctx._choice(): for o in self.options: with ctx._option(): ctx.last_node = o.parse(ctx) return ctx.last_node lookahead = ' '.join(ustr(urepr(f[0])) for f in self.lookahead if f) if lookahead: ctx._error('expecting one of {%s}' % lookahead) ctx._error('no available options')
def __str__(self): options = [ustr(o) for o in self.options] multi = any(len(o.splitlines()) > 1 for o in options) single = ' | '.join(o for o in options) if multi: return '\n|\n'.join(indent(o) for o in options) elif len(options) and len(single) > PEP8_LLEN: return ' ' + '\n| '.join(o for o in options) else: return single
def render(item, join='', **fields): """ Render the given item """ if item is None: return '' elif isinstance(item, strtype): return item elif isinstance(item, Renderer): return item.render(join=join, **fields) elif isiter(item): return join.join(render(e, **fields) for e in iter(item) if e is not None) elif isinstance(item, (int, float)): return item else: return ustr(item)
def __init__(self, text, filename=None, whitespace=None, tabwidth=None, comments_re=None, eol_comments_re=None, ignorecase=False, trace=False, nameguard=None, comment_recovery=False, **kwargs): self.original_text = text self.text = ustr(text) self.filename = filename or '' self.whitespace = whitespace self.tabwidth = tabwidth self.comments_re = comments_re self.eol_comments_re = eol_comments_re self.ignorecase = ignorecase self.trace = True self.nameguard = (nameguard if nameguard is not None else bool(self.whitespace_re)) self.comment_recovery = comment_recovery self._pos = 0 self._len = 0 self._linecount = 0 self._line_index = [] self._comment_index = [] self._preprocess() self._linecache = [] self._build_line_cache() self._comment_index = [[] for _ in self._line_index] self._len = len(self.text) self._re_cache = {}
def test_based_rule(self): grammar = '''\ start = b $ ; a = @:'a' ; b < a = {@:'b'} ; ''' model = genmodel("test", grammar) ast = model.parse("abb", nameguard=False) self.assertEquals(['a', 'b', 'b'], ast) self.assertEqual(trim(grammar), ustr(model))
def test_36_param_combinations(self): def assert_equal(target, value): self.assertEqual(target, value) class TC36Semantics(object): """Check all rule parameters for expected types and values""" def rule_positional(self, ast, p1, p2, p3, p4): assert_equal("ABC", p1) assert_equal(123, p2) assert_equal('=', p3) assert_equal("+", p4) return ast def rule_keyword(self, ast, k1, k2, k3, k4): assert_equal("ABC", k1) assert_equal(123, k2) assert_equal('=', k3) assert_equal('+', k4) return ast def rule_all(self, ast, p1, p2, p3, p4, k1, k2, k3, k4): assert_equal("DEF", p1) assert_equal(456, p2) assert_equal('=', p3) assert_equal("+", p4) assert_equal("HIJ", k1) assert_equal(789, k2) assert_equal('=', k3) assert_equal('+', k4) return ast grammar = ''' start = {rule_positional | rule_keywords | rule_all} $ ; rule_positional('ABC', 123, '=', '+') = 'a' ; rule_keywords(k1=ABC, k3='=', k4='+', k2=123) = 'b' ; rule_all('DEF', 456, '=', '+', k1=HIJ, k3='=', k4='+', k2=789) = 'c' ; ''' pretty = ''' start = {rule_positional | rule_keywords | rule_all} $ ; rule_positional(ABC, 123, '=', '+') = 'a' ; rule_keywords(k1=ABC, k3='=', k4='+', k2=123) = 'b' ; rule_all(DEF, 456, '=', '+', k1=HIJ, k3='=', k4='+', k2=789) = 'c' ; ''' model = genmodel('RuleArguments', grammar) self.assertEqual(trim(pretty), ustr(model)) model = genmodel('RuleArguments', pretty) ast = model.parse("a b c") self.assertEqual(['a', 'b', 'c'], ast) semantics = TC36Semantics() ast = model.parse("a b c", semantics=semantics) self.assertEqual(['a', 'b', 'c'], ast) codegen(model)
def param_repr(p): if isinstance(p, (int, float)): return ustr(p) else: return urepr(p)
def render_fields(self, fields): lines = "\n".join("# %s" % ustr(c) for c in self.node.comment.splitlines()) fields.update(lines=lines)
def __str__(self): return ustr(self.exp)
def __init__(self, rule): assert isinstance(rule, Rule), ustr(rule.name) super(RuleInclude, self).__init__(rule.exp) self.rule = rule
def __str__(self): return '%s+:%s' % (self.name, ustr(self.exp))
def __str__(self): exp = ustr(self.exp) if len(exp.splitlines()) > 1: return '(\n%s\n)' % indent(exp) else: return '(%s)' % trim(exp)
def __str__(self): sexp = ustr(self.exp) if len(sexp.splitlines()) <= 1: return '{%s}' % sexp else: return '{\n%s\n}' % indent(sexp)
def __str__(self): return '!' + ustr(self.exp)