예제 #1
0
 def __init__(self, theme):
     self._handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
     self._builder = CodeBuilder()
     self._compiler = OMeta.makeGrammar(compile_grammar, {'builder': self._builder})
     self._helpers = {}
     self.template_counter = 1
     _ghostpy_['theme'] = theme
예제 #2
0
 def __init__(self, theme, domain='', lookup_dir='', lookup_url=''):
     self._handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
     self._builder = CodeBuilder()
     self._compiler = OMeta.makeGrammar(compile_grammar, {'builder': self._builder})
     self._helpers = {}
     self.template_counter = 1
     _ghostpy_['base'] = domain
     _ghostpy_['lookup_dir'] = lookup_dir
     _ghostpy_['lookup_url'] = lookup_url
     _ghostpy_['theme'] = theme
예제 #3
0
 def __init__(self, theme, domain='', lookup_dir='', lookup_url=''):
     self._handlebars = OMeta.makeGrammar(handlebars_grammar, {},
                                          'handlebars')
     self._builder = CodeBuilder()
     self._compiler = OMeta.makeGrammar(compile_grammar,
                                        {'builder': self._builder})
     self._helpers = {}
     self.template_counter = 1
     _ghostpy_['base'] = domain
     _ghostpy_['lookup_dir'] = lookup_dir
     _ghostpy_['lookup_url'] = lookup_url
     _ghostpy_['theme'] = theme
예제 #4
0
class IRIGrammar(OMeta.makeGrammar(iriGrammar, globals())):
    iprivateRanges = [(0x0000e000, 0x0000f8ff),
                      (0x000f0000, 0x000ffffd),
                      (0x00100000, 0x0010fffd)]

    ucscharRanges  = [(0x000000a0, 0x0000d7ff),
                      (0x0000f900, 0x0000fdcf),
                      (0x0000fdf0, 0x0000ffef),
                      (0x00010000, 0x0001fffd),
                      (0x00020000, 0x0002fffd),
                      (0x00030000, 0x0003fffd),
                      (0x00040000, 0x0004fffd),
                      (0x00050000, 0x0005fffd),
                      (0x00060000, 0x0006fffd),
                      (0x00070000, 0x0007fffd),
                      (0x00080000, 0x0008fffd),
                      (0x00090000, 0x0009fffd),
                      (0x000a0000, 0x000afffd),
                      (0x000b0000, 0x000bfffd),
                      (0x000c0000, 0x000cfffd),
                      (0x000d0000, 0x000dfffd),
                      (0x000e0000, 0x000efffd)]

    def markURLStart(self):
        self.urlStart = self.input.position

    def markURLEnd(self):
        self.urlEnd = self.input.position
예제 #5
0
    def test_subclassing(self):
        """
        A subclass of an OMeta subclass should be able to call rules on its
        parent, and access variables in its scope.
        """
        from pymeta.grammar import OMeta

        grammar1 = """
        dig ::= :x ?(a <= x <= b) => int(x)
        """
        TestGrammar1 = OMeta.makeGrammar(grammar1, {'a': '0', 'b': '9'})

        grammar2 = """
        num ::= (<num>:n <dig>:d => n * base + d
                | <dig>)
        """
        TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {'base': 10})
        g = TestGrammar2("314159")
        self.assertEqual(g.apply("num")[0], 314159)

        grammar3 = """
        dig ::= :x ?(a <= x <= b or c <= x <= d) => int(x, base)
        """
        TestGrammar3 = TestGrammar2.makeGrammar(grammar3, {
            'c': 'a',
            'd': 'f',
            'base': 16
        })
        g = TestGrammar3("abc123")
        self.assertEqual(g.apply("num")[0], 11256099)
예제 #6
0
    def test_subclassing(self):
        """
        A subclass of an OMeta subclass should be able to call rules on its
        parent, and access variables in its scope.
        """
        from pymeta.grammar import OMeta

        grammar1 = """
        dig ::= :x ?(a <= x <= b) => int(x)
        """
        TestGrammar1 = OMeta.makeGrammar(grammar1, {'a':'0', 'b':'9'})

        grammar2 = """
        num ::= (<num>:n <dig>:d => n * base + d
                | <dig>)
        """
        TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {'base':10})
        g = TestGrammar2("314159")
        self.assertEqual(g.apply("num")[0], 314159)

        grammar3 = """
        dig ::= :x ?(a <= x <= b or c <= x <= d) => int(x, base)
        """
        TestGrammar3 = TestGrammar2.makeGrammar(grammar3, {'c':'a', 'd':'f', 'base':16})
        g = TestGrammar3("abc123")
        self.assertEqual(g.apply("num")[0], 11256099)
예제 #7
0
def preprocessSLAXML(node):
    """
    Given that node is a DOM element containing spell-like ability descriptions, insert
    additional DOM nodes into the text where interesting properties are found.
    """
    node.ownerDocument.documentElement.setAttribute(u'xmlns:p', PROP)
    title = node.getElementsByTagName(u'b')[0]
    title.setAttribute(u'p:property', u'powerName')

    def addSpellNameRDFa(node):
        node.setAttribute(u'p:property', u'spellName')
        #

    spellNames = list( util.doNodes(node, lambda n: n.nodeName == 'i', addSpellNameRDFa) )
    assert len(spellNames) > 0

    globs = globals().copy()
    Preprocessor = OMeta.makeGrammar(preprocGrammar, globs, "Preprocessor")

    todo = node.childNodes[:]
    for n, cn in enumerate(todo):
        if cn.nodeName == 'p':
            todo[n+1:n+1] = cn.childNodes[:]
            continue
        if cn.nodeName == '#text':
            parsed = []
            globs['A'] = lambda *x: parsed.extend(x)
            Preprocessor(cn.data).apply('slaText')
            nodes = joinRaw(parsed)
            substituteSLAText(cn, nodes)

    return node
예제 #8
0
def rdfaProcessSLAXML(node):
    """
    Given an SLA node that has been preprocessed, remove sep tags and freqStart
    tags, put in frequencies and spell wrappers.
    """
    globs = globals().copy()
    tree = NodeTree()
    tree.useNode(node)
    globs.update({'t':tree,
        'ww': lambda *x:None,
        # 'ww': debugWrite,
        })

    RDFaParser = OMeta.makeGrammar(rdfaGrammar, globs, 'RDFaParser')

    seq = flattenSLATree(tree.node)[1:]
    try:
        parser = RDFaParser(seq)
        parser.apply('sla')
    except ParseError:
        def propify(x):
            if hasattr(x, 'nodeName') and x.nodeName == 'span':
                if x.hasAttribute('p:property'):
                    return '<PROP {0}>'.format(x.getAttribute('p:property'))
            return x
        print map(propify, parser.input.data[:parser.input.position])
        print map(propify, parser.input.data[parser.input.position:])
        raise
    return tree.node
예제 #9
0
    def test_constant_bits(self):
        Grammar = OMeta.makeGrammar(r"""
var = "var" spaces letter+:name ';' -> ['var', ''.join(name)]
""", {})
        g = Grammar("var myvar;")
        result,error = g.apply("var")
        assert result == ['var', 'myvar']
예제 #10
0
def run_tests(grammar_iter, tests_iter, expecting=False):
    """ Creates an OMeta grammar from the given grammar iterable, and
        tries to parse each test in the given test iterable
    """
    grammar_string = "".join(grammar_iter)
    parser = OMeta.makeGrammar(grammar_string, {})

    results = []
    failures = []
    for test in tests_iter:
        if expecting:
            to_parse, production, expected = [x.strip() for x in test.split("|||")]
            expected_value = eval(expected)
        else:
            to_parse, production = [x.strip() for x in test.split("|||")]

        application = parser(to_parse)
        try:
            result_value = application.apply(production)
            if not expecting or result_value == expected_value:
                results.append(".")
            else:
                results.append("F")
                fail = "Test %s, %s failed" % (to_parse, production)
                ex = "Expected '%s', got '%s'" % (expected, result_value)
                failures.append((fail, ex))
        except Exception as e:
            results.append("F")
            fail = "Test %s, %s failed" % (to_parse, production)
            ex = "%s: %s" % (e.__class__.__name__, e)
            failures.append((fail, ex))

    return results, failures
예제 #11
0
    def test_spaces(self):
        Grammar = OMeta.makeGrammar(r"""
spaces_and_letters = spaces letter+:letters -> letters 
""", {})

        result, error = Grammar(" a").apply("spaces_and_letters")
        assert result == ['a']
예제 #12
0
    def no_test_begin(self):
        Grammar = OMeta.makeGrammar(r"""
from_start = begin 'a' -> 'a'
at_end = 'b' end -> 'b'
any = (from_start | at_end)*
""", {})
        result, error = Grammar(" a").apply("any")
        assert result == []
예제 #13
0
    def test_allow_comments(self):
        """
        Full line comments before a rule is allowed
        Make sure that the boot.py version supports comments
        """
        from pymeta.grammar import OMeta
        g = OMeta.makeGrammar("""
# comment for interp
interp = "Foo" 1 2 -> 3
            """,{})
        self.assertEqual(g(['F', 'o', 'o', 1, 2]).apply("interp")[0], 3)

        g = OMeta.makeGrammar("""
// comment for interp
interp = "Foo" 1 2 -> 3
            """,{})
        self.assertEqual(g(['F', 'o', 'o', 1, 2]).apply("interp")[0], 3)
예제 #14
0
class Compiler:
    """A handlebars template compiler.
    
    The compiler is not threadsafe: you need one per thread because of the
    state in CodeBuilder.
    """

    _handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
    _builder = CodeBuilder()
    _compiler = OMeta.makeGrammar(compile_grammar, {'builder':_builder})

    def __init__(self):
        self._helpers = {}

    def compile(self, source):
        """Compile source to a ready to run template.
        
        :param source: The template to compile - should be a unicode string.
        :return: A template ready to run.
        """
        
        assert isinstance(source, unicode)
        tree = self._handlebars(source).apply('template')[0]
        # print source
        # print '-->'
        # print "T", tree
        code = self._compiler(tree).apply('compile')[0]
        # print code
        return code

    def register_helper(self, helper_name, helper_callback):
        """Register a block helper.

        :param helper_name: The name of the helper.
        :param helper_callback: A callback to call when the helper is used.
            This should accept two parameters - items (the context sub-value
            specified by the block rule in the template) and options (which has
            template logic in it such as the render callback to render the
            block content for a single item).
        :return: None
        """
        
        _pybars_['helpers'][helper_name]= helper_callback
        
        global_helpers[helper_name] = helper_callback
예제 #15
0
def parse(s):
    """
    Produce a sequence of UIState objects from a text representation of a
    seedo diagram
    """
    globs = globals().copy()
    global Parser
    if Parser is None:
        Parser = OMeta.makeGrammar(seedoGrammar, globs, "Parser")
    return Parser(s).apply('uiTests')
예제 #16
0
 def test_super(self):
     """
     Rules can call the implementation in a superclass.
     """
     from pymeta.grammar import OMeta
     grammar1 = "expr = letter"
     TestGrammar1 = OMeta.makeGrammar(grammar1, {})
     grammar2 = "expr = super | digit"
     TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
     self.assertEqual(TestGrammar2("x").apply("expr")[0], "x")
     self.assertEqual(TestGrammar2("3").apply("expr")[0], "3")
예제 #17
0
    def test_end(self):
        Grammar = OMeta.makeGrammar(r"""
at_end = 'b' end -> 'b|'
any = (at_end | anything)*
""", {})
        result, error = Grammar(" a").apply("any")
        assert result == [' ','a']
        result, error = Grammar("b a").apply("any")
        assert result == ['b',' ','a']
        result, error = Grammar(" ab").apply("any")
        assert result == [' ','a','b|']
예제 #18
0
 def test_super(self):
     """
     Rules can call the implementation in a superclass.
     """
     from pymeta.grammar import OMeta
     grammar1 = "expr ::= <letter>"
     TestGrammar1 = OMeta.makeGrammar(grammar1, {})
     grammar2 = "expr ::= <super> | <digit>"
     TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
     self.assertEqual(TestGrammar2("x").apply("expr")[0], "x")
     self.assertEqual(TestGrammar2("3").apply("expr")[0], "3")
예제 #19
0
    def test_read_ahead(self):
        Grammar = OMeta.makeGrammar(r"""
attribute = bool | valued
bool = (letter+):name ~('=') -> (''.join(name),True)
valued = (letter+):name '=' (letter+):value -> (''.join(name),''.join(value))
""", {})

        g = Grammar("a")
        result,error = g.apply("attribute")
        assert result == ('a',True)

        g = Grammar("a=b")
        result,error = g.apply("attribute")
        assert result == ('a','b')
예제 #20
0
 def test_makeGrammar(self):
     #imported here to prevent OMetaGrammar from being constructed before
     #tests are run
     from pymeta.grammar import OMeta
     results = []
     grammar = """
     digit ::= :x ?('0' <= x <= '9') => int(x)
     num ::= (<num>:n <digit>:d !(results.append(True)) => n * 10 + d
            | <digit>)
     """
     TestGrammar = OMeta.makeGrammar(grammar, {'results':results})
     g = TestGrammar("314159")
     self.assertEqual(g.apply("num")[0], 314159)
     self.assertNotEqual(len(results), 0)
예제 #21
0
 def test_makeGrammar(self):
     #imported here to prevent OMetaGrammar from being constructed before
     #tests are run
     from pymeta.grammar import OMeta
     results = []
     grammar = """
     digit ::= :x ?('0' <= x <= '9') => int(x)
     num ::= (<num>:n <digit>:d !(results.append(True)) => n * 10 + d
            | <digit>)
     """
     TestGrammar = OMeta.makeGrammar(grammar, {'results': results})
     g = TestGrammar("314159")
     self.assertEqual(g.apply("num"), 314159)
     self.assertNotEqual(len(results), 0)
예제 #22
0
 def test_makeGrammar(self):
     #imported here to prevent OMetaGrammar from being constructed before
     #tests are run
     from pymeta.grammar import OMeta
     results = []
     grammar = dedent("""
     digit = :x ?('0' <= x <= '9') -> int(x)
     num = (num:n digit:d !(results.append(True)) -> n * 10 + d
            | digit)
     """)
     TestGrammar = OMeta.makeGrammar(grammar, {'results': results})
     g = TestGrammar("314159")
     self.assertEqual(g.apply("num")[0], 314159)
     self.assertNotEqual(len(results), 0)
예제 #23
0
    def test_nested_productions(self):
        Grammar = OMeta.makeGrammar(r"""
attributes = spaces (attribute:a spaces -> a)+:as -> as
attribute = bool | valued
bool = (letter+):name ~('=') -> (''.join(name),True)
valued = (letter+):name '=' (letter+):value -> (''.join(name),''.join(value))
""", {})

        g = Grammar("a=b")
        result,error = g.apply("attributes")
        assert result == [('a','b')]

        g = Grammar("a=b c d=e")
        result,error = g.apply("attributes")
        assert result == [('a','b'),('c',True),('d','e')]
예제 #24
0
    def test_optional(self):
        Grammar = OMeta.makeGrammar(r"""
word_comment = "/*" ' '? letter*:t ' '? "*/" -> ['comment', ''.join(t)]
""", {})
        result,error = Grammar("/* abc */").apply("word_comment")
        assert result == ['comment', 'abc']
        
        result,error = Grammar("/*abc*/").apply("word_comment")
        assert result == ['comment', 'abc']

        result,error = Grammar("/* */").apply("word_comment")
        assert result == ['comment', '']

        result,error = Grammar("/**/").apply("word_comment")
        assert result == ['comment', '']
예제 #25
0
    def fails_test_empty(self):
#         Grammar = OMeta.makeGrammar(r"""
# stuff = letter+:letters -> ''.join(letters) | empty -> 'nao'
# """, {})

        # doesn't seem to work, try alternative
        Grammar = OMeta.makeGrammar(r"""
stuff = letters | nothing
letters = letter+:letters -> ''.join(letters)
nothing = empty -> 'nao'
""", {})

        result,error = Grammar("").apply("stuff")
        assert result == "nao"
        result,error = Grammar("abc").apply("stuff")
        assert result == "abc"
예제 #26
0
    def test_makeGrammar(self):
        # imported here to prevent OMetaGrammar from being constructed before
        # tests are run
        from pymeta.grammar import OMeta

        results = []
        grammar = dedent(
            """
        digit = :x ?('0' <= x <= '9') -> int(x)
        num = (num:n digit:d !(results.append(True)) -> n * 10 + d
               | digit)
        """
        )
        TestGrammar = OMeta.makeGrammar(grammar, {"results": results})
        g = TestGrammar("314159")
        self.assertEqual(g.apply("num")[0], 314159)
        self.assertNotEqual(len(results), 0)
예제 #27
0
    def test_tokens(self):
        Grammar = OMeta.makeGrammar(r"""
a = token('a') -> ('t','a')
b = token('b') -> ('t','b')
t = a | b
""",{})
        result, error = Grammar(" a").apply("t")
        assert result == ('t','a')
        result, error = Grammar(" b").apply("t")
        assert result == ('t','b')
        # result, error = Grammar("\0xa0 a").apply("t")
        # assert result == ('t','a')
        result, error = Grammar("\t a").apply("t")
        assert result == ('t','a')
        result, error = Grammar("\n a").apply("t")
        assert result == ('t','a')
        result, error = Grammar("a\n").apply("t")
        assert result == ('t','a')
예제 #28
0
    def test_subclassing(self):
        """
        A subclass of an OMeta subclass should be able to call rules on its
        parent.
        """
        from pymeta.grammar import OMeta

        grammar1 = """
        dig ::= :x ?('0' <= x <= '9') => int(x)
        """
        TestGrammar1 = OMeta.makeGrammar(grammar1, {})

        grammar2 = """
        num ::= (<num>:n <dig>:d => n * 10 + d
                | <dig>)
        """
        TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
        g = TestGrammar2("314159")
        self.assertEqual(g.apply("num"), 314159)
예제 #29
0
    def test_subclassing(self):
        """
        A subclass of an OMeta subclass should be able to call rules on its
        parent.
        """
        from pymeta.grammar import OMeta

        grammar1 = """
        dig ::= :x ?('0' <= x <= '9') => int(x)
        """
        TestGrammar1 = OMeta.makeGrammar(grammar1, {})

        grammar2 = """
        num ::= (<num>:n <dig>:d => n * 10 + d
                | <dig>)
        """
        TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
        g = TestGrammar2("314159")
        self.assertEqual(g.apply("num"), 314159)
예제 #30
0
    def test_subclassing(self):
        """
        A subclass of an OMeta subclass should be able to call rules on its
        parent.
        """
        from pymeta.grammar import OMeta

        grammar1 = dedent("""
        dig = :x ?('0' <= x <= '9') -> int(x)
        """)
        TestGrammar1 = OMeta.makeGrammar(grammar1, {})

        grammar2 = dedent("""
        num = (num:n dig:d -> n * 10 + d
                | dig)
        """)
        TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
        g = TestGrammar2("314159")
        self.assertEqual(g.apply("num")[0], 314159)
예제 #31
0
    def test_subclassing(self):
        """
        A subclass of an OMeta subclass should be able to call rules on its
        parent.
        """
        from pymeta.grammar import OMeta

        grammar1 = dedent("""
        dig = :x ?('0' <= x <= '9') -> int(x)
        """)
        TestGrammar1 = OMeta.makeGrammar(grammar1, {})

        grammar2 = dedent("""
        num = (num:n dig:d -> n * 10 + d
                | dig)
        """)
        TestGrammar2 = TestGrammar1.makeGrammar(grammar2, {})
        g = TestGrammar2("314159")
        self.assertEqual(g.apply("num")[0], 314159)
예제 #32
0
    def no_test_uc(self):
        import sys
        # sys.setdefaultencoding('utf-8')
        import unicodedata
        assert unicodedata.category(u"a") == "Ll"
        
        Grammar = OMeta.makeGrammar(r"""
letters = UnicodeLetter*
number = UnicodeDigit*

UnicodeLetter = uc('L') | uc('Nl') | uc('Ll') | uc('Lu')
UnicodeCombiningMark = uc('Mn') | uc('Mc')
UnicodeDigit = uc('Nd')
UnicodeConnectorPunctuation = uc('Pc')
""",{})
        result, error = Grammar(u"a").apply("UnicodeLetter")
        assert result == u'a'
        result, error = Grammar(u"\u0393").apply("UnicodeLetter")
        assert result == u'\u0393'
        result, error = Grammar(u"\u03931").apply("letters")
        assert result == [u'\u0393']
        result, error = Grammar(u"123a").apply("number")
        assert result == [u'1', u'2', u'3']
예제 #33
0
class Compiler:
    """A handlebars template compiler.

    The compiler is not threadsafe: you need one per thread because of the
    state in CodeBuilder.
    """

    _handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
    _builder = CodeBuilder()
    _compiler = OMeta.makeGrammar(compile_grammar, {'builder': _builder})

    def __init__(self):
        self._helpers = {}
        self.template_counter = 1

    def _extract_word(self, source, position):
        boundry = re.search('{{|{|\s|$', source[:position][::-1])
        start_offset = boundry.end() if boundry.group(0).startswith(
            '{') else boundry.start()

        boundry = re.search('}}|}|\s|$', source[position:])
        end_offset = boundry.end() if boundry.group(0).startswith(
            '}') else boundry.start()

        return source[position - start_offset:position + end_offset]

    def _generate_code(self, source):
        """
        Common compilation code shared between precompile() and compile()

        :param source:
            The template source as a unicode string

        :return:
            A tuple of (function, source_code)
        """

        if not isinstance(source, str_class):
            raise PybarsError("Template source must be a unicode string")

        tree, (position, _) = self._handlebars(source).apply('template')

        self.clean_whitespace(tree)

        if debug:
            print('\nAST')
            print('---')
            print(tree)
            print('')

        if position < len(source):
            line_num = source.count('\n') + 1
            beginning_of_line = source.rfind('\n', 0, position)
            if beginning_of_line == -1:
                char_num = position
            else:
                char_num = position - beginning_of_line
            word = self._extract_word(source, position)
            raise PybarsError("Error at character %s of line %s near %s" %
                              (char_num, line_num, word))

        # Ensure the builder is in a clean state - kinda gross
        self._compiler.globals['builder']._reset()

        output = self._compiler(tree).apply('compile')[0]
        return output

    def precompile(self, source):
        """
        Generates python source code that can be saved to a file for caching

        :param source:
            The template to generate source for - should be a unicode string

        :return:
            Python code as a unicode string
        """

        return self._generate_code(source).full_code

    def compile(self, source, path=None):
        """Compile source to a ready to run template.

        :param source:
            The template to compile - should be a unicode string

        :return:
            A template function ready to execute
        """

        container = self._generate_code(source)

        def make_module_name(name, suffix=None):
            output = 'pybars._templates.%s' % name
            if suffix:
                output += '_%s' % suffix
            return output

        if not path:
            path = '_template'
            generate_name = True
        else:
            path = path.replace('\\', '/')
            path = path.replace('/', '_')
            mod_name = make_module_name(path)
            if mod_name in sys.modules:
                generate_name = True

        if generate_name:
            mod_name = make_module_name(path, self.template_counter)
            while mod_name in sys.modules:
                self.template_counter += 1
                mod_name = make_module_name(path, self.template_counter)

        mod = ModuleType(mod_name)
        filename = '%s.py' % mod_name.replace('pybars.', '').replace('.', '/')
        exec(compile(container.full_code, filename, 'exec', dont_inherit=True),
             mod.__dict__)
        sys.modules[mod_name] = mod
        linecache.getlines(filename, mod.__dict__)

        return mod.__dict__[container.name]

    def clean_whitespace(self, tree):
        """
        Cleans up whitespace around block open and close tags if they are the
        only thing on the line

        :param tree:
            The AST - will be modified in place
        """

        pointer = 0
        end = len(tree)

        while pointer < end:
            piece = tree[pointer]
            if piece[0] == 'block':
                child_tree = piece[3]

                # Look at open tag, if the only other thing on the line is whitespace
                # then delete it so we don't introduce extra newlines to the output
                open_pre_whitespace = False
                open_pre_content = True
                if pointer > 1 and tree[pointer - 1][0] == 'whitespace' and (
                        tree[pointer - 2][0] == 'newline'
                        or tree[pointer - 2] == 'template'):
                    open_pre_whitespace = True
                    open_pre_content = False
                elif pointer > 0 and (tree[pointer - 1][0] == 'newline'
                                      or tree[pointer - 1] == 'template'):
                    open_pre_content = False

                open_post_whitespace = False
                open_post_content = True
                child_len = len(child_tree)
                if child_len > 2 and child_tree[1][
                        0] == 'whitespace' and child_tree[2][0] == 'newline':
                    open_post_whitespace = True
                    open_post_content = False
                elif child_len > 1 and child_tree[1][0] == 'newline':
                    open_post_content = False

                if not open_pre_content and not open_post_content:
                    if open_pre_whitespace:
                        tree.pop(pointer - 1)
                        pointer -= 1
                        end -= 1
                    if open_post_whitespace:
                        child_tree.pop(1)
                    child_tree.pop(1)  # trailing newline

                # Do the same thing, but for the close tag
                close_pre_whitespace = False
                close_pre_content = True
                child_len = len(child_tree)
                if child_len > 2 and child_tree[
                        child_len - 1][0] == 'whitespace' and child_tree[
                            child_len - 2][0] == 'newline':
                    close_pre_whitespace = True
                    close_pre_content = False
                elif child_len > 1 and child_tree[child_len -
                                                  1][0] == 'newline':
                    close_pre_content = False

                close_post_whitespace = False
                close_post_content = True
                tree_len = len(tree)
                if tree_len > pointer + 2 and tree[
                        pointer +
                        1][0] == 'whitespace' and tree[pointer +
                                                       2][0] == 'newline':
                    close_post_whitespace = True
                    close_post_content = False
                elif tree_len == pointer + 2 and tree[pointer +
                                                      1][0] == 'whitespace':
                    close_post_whitespace = True
                    close_post_content = False
                elif tree_len > pointer + 1 and tree[pointer +
                                                     1][0] == 'newline':
                    close_post_content = False
                elif tree_len == pointer + 1:
                    close_post_content = False

                if not close_pre_content and not close_post_content:
                    if close_pre_whitespace:
                        child_tree.pop()
                    child_tree.pop()  # preceeding newline
                    if close_post_whitespace:
                        tree.pop(pointer + 1)
                        end -= 1

                self.clean_whitespace(child_tree)

            pointer += 1
예제 #34
0
next ::= <token "NEXT:"> <address>:nextparticle ','*								=> {'NEXT':nextparticle}

address ::= <integer>:memaddress										=> memaddress
         | <token "NULL">											=> None

integer ::= '-' <dig>+:ds											=> -1*int(''.join(ds))
          | <dig>+:ds												=> int(''.join(ds))
      
dig ::= '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9'
          
decimal ::= <integer>:whole '.' <dig>+:frac									=> float(str(whole)+'.'+''.join(frac))
          | <integer>
"""

# Make a class which reads the dump grammar
grammar = OMeta.makeGrammar(dump_grammar, globals())

# Exit if we've been given no file to check
if len(sys.argv) < 2:
	print "Usage: memchecker.py DUMP_FILE"
	sys.exit()
	
# Open the given file
infile = open(sys.argv[1], 'r')

inlines = ''.join(infile.readlines())

infile.close()

# Instantiate the dump grammar with the input
print "Making grammar"
예제 #35
0
#prec18 :i ::= <compare i>:c												=> c
## Identity tests
##prec19 :i ::= <>:=>
## Membership tests
##prec20 :i ::= <>:=>
## Boolean NOT
#prec21 :i ::= <not i>:n													=> n
## Boolean AND
#prec22 :i ::= <and i>:a													=> a
## Boolean OR
#prec23 :i ::= <or i>:o													=> o
## Lambda
#prec24 :i ::= <lambda i>:l												=> l
"""

g = OMeta.makeGrammar(strip_comments(gram), {'match_args':match_args, 'from_flag':from_flag})

if __name__ == '__main__':
	try:
		mode = "none"
		if sys.argv[2].strip() == "list":
			mode = "list"
		elif sys.argv[2].strip() == "finderror":
			mode = "finderror"
		if not mode is "none":
			ins = open(sys.argv[1], 'r')
			parsefiles = []

			for line in ins.readlines():
				parsefiles.append(line.strip())
			ins.close()
예제 #36
0
class RegexGrammar(OMeta.makeGrammar(regexGrammar, globals())):
    pass
예제 #37
0
class Translator(OMeta.makeGrammar(pyva_translator, {'p': p, 'json': json})):
    op_map = {
        'not': '!',
    }
    binop_map = {
        'or': '||',
        'and': '&&',
        'is': '===',
        'is not': '!==',
    }
    name_map = {
        'None': 'null',
        'True': 'true',
        'False': 'false',
        'self': 'this',
        'int': 'parseInt',
        'float': 'parseFloat',
        'bool': '!!',
        'tuple': 'list',
        'unicode': 'str',
    }

    #class variable indicating whether comments are allowed or not
    allowcomments = False

    def __init__(self, *args, **kwargs):
        super(Translator, self).__init__(*args, **kwargs)
        self.indentation = 0
        self.local_vars = set()
        self.nonlocal_vars = set()
        self.global_vars = set()
        self.var_stack = []
        self.temp_var_id = 0

    @classmethod
    def parse(cls, *args, **kwargs):
        if 'debug_flag' in kwargs:
            #Set allowcomments to the debug flag
            cls.allowcomments = bool(kwargs['debug_flag'])
            #Remove the debug flag keyword
            del kwargs['debug_flag']
        else:
            cls.allowcomments = False
        return super(Translator, cls).parse(*args, **kwargs)

    def get_name(self, name):
        if name == 'self' and name not in self.global_vars:
            return name
        return self.name_map.get(name, name)

    def make_temp_var(self, name, prefix='_$tmp'):
        self.temp_var_id += 1
        return '%s%s_%s' % (prefix, self.temp_var_id, name)

    def indent(self):
        self.indentation += 1
        return self.indentation

    def dedent(self):
        self.indentation -= 1

    def is_pure_var_name(self, var):
        return '.' not in var and '[' not in var

    def is_list_var_name(self, var):
        return var[0] == '[' and var[-1] == ']'

    def __register_var(self, var):
        if self.is_pure_var_name(var) and \
          var not in self.global_vars and \
          var not in self.nonlocal_vars:
            self.local_vars.add(var)

    def __get_var_list(self, var_list_str):
        var_names = var_list_str[1:-1].split(',')
        var_names = [var_name.strip() for var_name in var_names]
        return var_names

    def make_comment(self, c):
        if self.allowcomments:
            return '//%s' % c
        else:
            return None

    def make_stmts(self, ss):
        """
		Filter out None statements - these are currently filtered out comments
		"""
        return [s for s in ss if s is not None]

    def make_eq(self, var, val):
        indent = '  ' * self.indentation
        if self.is_list_var_name(var):
            var_names = self.__get_var_list(var)
            var_str = '_$rapyd_tuple$_ = %s' % val
            for i in xrange(len(var_names)):
                var_str += ';\n%s%s = %s' % (indent, var_names[i],
                                             '_$rapyd_tuple$_[' + str(i) + ']')
        else:
            var_str = '%s = %s' % (var, val)

        return var_str

    def register_var(self, var):
        if self.is_list_var_name(var):
            var_names = self.__get_var_list(var)
            self.__register_var('_$rapyd_tuple$_')
            for i in xrange(len(var_names)):
                self.__register_var(var_names[i])
        else:
            self.__register_var(var)

    def register_nonlocals(self, vars):
        for var in vars:
            if self.is_pure_var_name(var) and var not in self.global_vars:
                self.nonlocal_vars.add(var)
                self.local_vars -= set([var])

    def register_globals(self, vars):
        self.global_vars.update(
            [var for var in vars if self.is_pure_var_name(var)])
        self.local_vars -= self.global_vars
        self.nonlocal_vars -= self.global_vars

    def push_vars(self):
        self.var_stack.append(
            (self.local_vars, self.nonlocal_vars, self.global_vars))
        self.local_vars = set()
        self.nonlocal_vars = set()
        self.global_vars = set()

    def translate_cmp(self, x, op, y):
        if op == 'not in':
            #special case
            return '!(%s in %s)' % (x, y)
        else:
            return '(%s %s %s)' % (x, self.binop_map.get(op, op), y)

    def make_chained_cmp(self, l, r):
        """
		build a chained comparison - this is not intended to handle
		single comparions because it adds unnecessary extra variables
		"""
        comps = iter(r)
        comp = comps.next()

        final_comp = self.translate_cmp(l, comp[0], comp[1])
        prev_var = comp[1]

        for comp in comps:
            final_comp = '%s && %s' % \
             (final_comp, self.translate_cmp(prev_var, comp[0], comp[1]))
            prev_var = comp[1]
        return '(%s)' % final_comp

    def pop_vars(self):
        self.local_vars, self.nonlocal_vars, self.global_vars = self.var_stack.pop(
        )

    def make_block(self, stmts, indentation):
        indentstr = '  ' * indentation
        line_list = []
        for stmt in stmts:
            if stmt.startswith('//'):
                line_list.append(stmt)
            else:
                line_list.append('%s%s' % (indentstr, stmt))
        return '{\n%s\n%s}' % ('\n'.join(line_list), '  ' * (indentation - 1))

    def make_func_block(self, stmts, indentation):
        if self.local_vars:
            vars_str = ', '.join(sorted(self.local_vars))
            var_stmt = ['var %s;' % vars_str]
        else:
            var_stmt = []
        return self.make_block(var_stmt + stmts, indentation)

    def make_dict(self, items, indentation):
        indentstr = '  ' * indentation
        sep = ',\n%s' % indentstr
        return '{\n%s%s\n%s}' % (indentstr, sep.join(items), '  ' *
                                 (indentation - 1))

    def comments_str(self, raw_comments):
        comments = []
        if self.allowcomments:
            for comment in raw_comments:
                if comment and comment[0] == 'comment':
                    comments.append('//%s' % comment[1])

        if comments:
            return '\n%s\n%s' % ('\n'.join(comments), '  ' * self.indentation)
        else:
            return ''

    def make_if(self, cond, block, elifexprs, elseblock):
        expr = ['if (%s) %s' % (cond, block)]
        for elifexpr in elifexprs:
            comments = self.comments_str(elifexpr[0])
            expr.append('%selse if (%s) %s' %
                        (comments, elifexpr[1], elifexpr[2]))
        if elseblock and elseblock[1]:
            comments = self.comments_str(elseblock[0])
            expr.append('%selse %s' % (comments, elseblock[1]))
        return ' '.join(expr)

    def make_try(self, body, catch, fin):
        expr = ['try %s' % body]
        if catch is not None:
            expr.append('catch(%s) %s' % (catch[1], catch[2]))
        if fin is not None:
            expr.append('finally %s' % fin[1])
        return ' '.join(expr)

    def make_for(self, var, data, body):
        indentstr = '  ' * self.indentation
        datavar = self.make_temp_var('data')
        lenvar = self.make_temp_var('len')
        index = self.make_temp_var('index')
        unpack_str = ''
        if self.is_list_var_name(var):
            RAPYD_PACKED_TUPLE = '_$rapyd$_tuple'
            var_list = self.__get_var_list(var)
            unpack_str = ''
            for i in xrange(len(var_list)):
                unpack_str += '%s%s = %s[%d];\n' % \
                (indentstr + '  ', var_list[i], RAPYD_PACKED_TUPLE, i)
            var = RAPYD_PACKED_TUPLE
        init = 'var %s = _$rapyd$_iter(%s);\n%svar %s = %s.length;\n%s' % (
            datavar, data, indentstr, lenvar, datavar, indentstr)
        body = body.replace(
            '{', '{\n%s%s = %s[%s];\n%s' %
            (indentstr + '  ', var, datavar, index, unpack_str), 1)
        return '%sfor (var %s = 0; %s < %s; %s++) %s' % (init, index, index,
                                                         lenvar, index, body)

    def temp_var_or_literal(self, name, var, init):
        """
		Returns either the literal if it's a literal or a temporary variable
		storing the non-literal in addition to regitering the temporary with
		init.
		"""
        if var[0]:
            # Literal
            return var[1]
        temp = self.make_temp_var(name)
        init.append('%s = %s' % (temp, var[1]))
        return temp

    def make_for_range(self, var, for_range, body):
        # for_range is a list of tuples (bool:literal, str:js_code)
        indentstr = '  ' * self.indentation
        stepstr = '%s++' % var
        init = []
        if len(for_range) == 1:
            start = 0
            end = self.temp_var_or_literal('end', for_range[0], init)
        else:
            start = for_range[0][1]
            end = self.temp_var_or_literal('end', for_range[1], init)
            if len(for_range) == 3:
                step = self.temp_var_or_literal('step', for_range[2], init)
                stepstr = '%s += %s' % (var, step)

        initstr = ''
        if init:
            initstr = 'var %s;\n%s' % (', '.join(init), indentstr)

        return '%sfor (%s = %s; %s < %s; %s) %s' % (initstr, var, start, var,
                                                    end, stepstr, body)

    def make_for_reversed_range(self, var, for_range, body):
        indentstr = '  ' * self.indentation
        if len(for_range) == 1:
            return '%s = %s;\n%swhile (%s--) %s' % (var, for_range[0][1],
                                                    indentstr, var, body)

        init = []
        start = for_range[1][1]
        end = self.temp_var_or_literal('end', for_range[0], init)
        if len(for_range) == 3:
            step = self.temp_var_or_literal('step', for_range[2], init)
            stepstr = '%s -= %s' % (var, step)
        else:
            stepstr = '%s--' % var

        initstr = ''
        if init:
            initstr = 'var %s;\n%s' % (', '.join(init), indentstr)

        return '%sfor (%s = (%s) - 1; %s >= %s; %s) %s' % (
            initstr, var, start, var, end, stepstr, body)

    def make_func(self, name, args, body):
        if name:
            name = self.get_name(name[1])
            self.register_var(name)
            func = '%s = function' % name
            body += ';'
        else:
            func = 'function'
        if args and args[0] == self.get_name('self'):
            args = args[1:]
        return '%s(%s) %s' % (func, ', '.join(args), body)
예제 #38
0
# Recurse through boundless loops
while ::= <anything>:a ?(a.__class__ == While) => While(apply(a.test), apply(a.body), apply(a.else_))

# Recurse through with?
with ::= <anything>:a ?(a.__class__ == With) => With(apply(a.expr), apply(a.vars), apply(a.body))

# Recurse through Yields
yield ::= <anything>:a ?(a.__class__ == Yield) => Yield(apply(a.value))

"""

# Now we embed the transformations in every AST node, so that they can
# apply them recursively to their children
#from python_rewriter.base import grammar
import sys
transforms = OMeta.makeGrammar(strip_comments(tree_transform), globals())


# Patch the grammar for recursion
def ins(self, val):
    """This is a very dangerous function! We monkey-patch PyMeta grammars with
	this so that we can insert an arbitrary value as the next input. This allows
	us to recurse without having to	instantiate another matcher (we effectively
	use the existing input as a stack). Beware of leaving cruft behind on the
	input!"""
    # Insert the value
    self.input.data.insert(self.input.position, val)
    # Throw away any cached input
    self.input.tl = None
    self.input.memo = {}
    # Ensure success, if needed
예제 #39
0
파일: __init__.py 프로젝트: jeffd/soft-dev
from pymeta.grammar import OMeta
from pymeta.runtime import ParseError

from translate import convert_lines

__all__ = ['validate']

# The file which will contain the bnf grammar for the game responses
BNF_FILE = path.join(path.dirname(path.abspath(__file__)), "./bnf_10.txt")
# The file which contains some base grammar definitions for JSON
JSON_GRAMMAR_FILE = path.join(path.dirname(path.abspath(__file__)), "./json_base.grm")

# The name of the production which all messages extend from in the grammar
TOP_PRODUCTION = "msg"

# Convert the BNF into pymeta, then add the JSON base to it
_grammar = "\n\n".join(convert_lines(open(BNF_FILE)))
_grammar += "\n\n" + "".join(open(JSON_GRAMMAR_FILE))

# Make the parser from it
_parser  = OMeta.makeGrammar(_grammar, {})

def validate(response, production=TOP_PRODUCTION):
    ''' Trys to validate the given response with the loaded grammar.
        Returns true if the response is valid in the grammar
    '''
    application = _parser(response)
    application.apply(production)
    return True
예제 #40
0
파일: parser.py 프로젝트: mjumbewu/pymeta

def Tuple(args):
    return _Term(Tag(".tuple."), args)

def Bag(args):
    return _Term(Tag(".bag."), args)

def LabelledBag(f, arg):
    return _Term(f, [arg])

def Attr(k, v):
    return _Term(Tag(".attr."), [k, v])


BaseTermLParser = OMeta.makeGrammar(termLGrammar, globals(), "TermLParser")

class TermLParser(BaseTermLParser, CommonParser):
    pass
TermLParser.globals.update(CommonParser.globals)

def _parseTerm(termString):
    """
    Parser frontend for term strings.
    """
    p = TermLParser(termString)
    result, error = p.apply("term")
    try:
        p.input.head()
    except EOFError:
        pass
예제 #41
0
파일: css3.py 프로젝트: thepian/themaestro
"""
A grammar for parsing a tiny CSS-like language, plus a transformer for it.
"""
from pymeta.grammar import OMeta
from itertools import chain

tinyCSSGrammar = """

name ::= <letterOrDigit>+:ls => ''.join(ls)

tag ::= ('<' <spaces> <name>:n <spaces> <attribute>*:attrs '>'
         <html>:c
         '<' '/' <token n> <spaces> '>'
             => [n.lower(), dict(attrs), c])

html ::= (<text> | <tag>)*

text ::= (~('<') <anything>)+:t => ''.join(t)

attribute ::= <spaces> <name>:k <token '='> <quotedString>:v => (k, v)

quotedString ::= (('"' | '\''):q (~<exactly q> <anything>)*:xs <exactly q>
                     => ''.join(xs))

"""
TinyCSS = OMeta.makeGrammar(tinyCSSGrammar, globals(), name="TinyCSS")

def compile(source):
    pass #return Translator.parse_source(TinyCSS.parse_source(source))
예제 #42
0
none_list :a ::=  => make_list(a)
"""

# These are the objects which will be available to the matcher
import sys
args = globals()
args['constants'] = [str, int, float, complex]
args['import_match'] = import_match
args['tuple_args'] = tuple_args
args['is_del'] = is_del
args['pick_quotes'] = pick_quotes
args['make_list'] = make_list
args['sys'] = sys

# grammar is the class, instances of which can match using grammar_def
grammar = OM.makeGrammar(grammar_def, args)

# Patch the grammar for recursion
def ins(self, val):
	"""This is a very dangerous function! We monkey-patch PyMeta grammars with
	this so that we can insert an arbitrary value as the next input. This allows
	us to recurse without having to	instantiate another matcher (we effectively
	use the existing input as a stack). Beware of leaving cruft behind on the
	input!"""
	# Insert the value
	self.input.data.insert(self.input.position, val)
	# Throw away any cached input
	self.input.tl = None
	self.input.memo = {}
	# Ensure success, if needed
	return True
예제 #43
0
  // Set the current namespace
  current_namespace = (struct namespace *)vtable_toplevel_namespace;
}}}END_EMBEDDED_C

EMBEDDED_C{{{
  return 0;
  }
}}}END_EMBEDDED_C

"""

params = globals()
for key in locals().keys():
	params[key] = locals()[key]

grammar = OMeta.makeGrammar(strip_comments(grammar_def), params)

if __name__ == '__main__':
	if len(sys.argv) < 2:
		print "Usage: iddish_compiler.py input.id [output.c]"
		sys.exit()

	in_name = sys.argv[1]
	if len(sys.argv) > 2:
		out_name = sys.argv[2]
	else:
		out_name = in_name.rsplit('.', 1)[0]+'.c'
	
	in_file = open(in_name, 'r')
	in_lines = ''.join([l for l in in_file.readlines()])
	in_file.close()
예제 #44
0
    return _Term(Tag(".tuple."), args)


def Bag(args):
    return _Term(Tag(".bag."), args)


def LabelledBag(f, arg):
    return _Term(f, [arg])


def Attr(k, v):
    return _Term(Tag(".attr."), [k, v])


BaseTermLParser = OMeta.makeGrammar(termLGrammar, globals(), "TermLParser")


class TermLParser(BaseTermLParser, CommonParser):
    pass


TermLParser.globals.update(CommonParser.globals)


def _parseTerm(termString):
    """
    Parser frontend for term strings.
    """
    p = TermLParser(termString)
    result, error = p.apply("term")
예제 #45
0
class Grammar(OMeta.makeGrammar(pyva_grammar, {'p': p})):
    keywords = set((
        'and',
        'as',
        'break',
        'case',
        'catch',
        'class',
        'continue',
        'def',
        'default',
        'del',
        'delete',
        'do',
        'elif',
        'else',
        'except',
        'finally',
        'for',
        'function',
        'if',
        'in',
        'instanceof',
        'new',
        'not',
        'or',
        'pass',
        'raise',
        'return',
        'switch',
        'throw',
        'til',
        'to',
        'try',
        'var',
        'void',
        'while',
        'with',
        'yield',
    ))
    hex_digits = '0123456789abcdef'

    def __init__(self, *args, **kwargs):
        super(Grammar, self).__init__(*args, **kwargs)
        self.parenthesis = 0
        self.parenthesis_stack = []
        self.indent_stack = [0]

    def enter_paren(self):
        self.parenthesis += 1

    def leave_paren(self):
        self.parenthesis -= 1

    def enter_deflambda(self, indent):
        self.indent_stack.append(indent)
        self.parenthesis_stack.append(self.parenthesis)
        self.parenthesis = 0

    def leave_deflambda(self):
        self.indent_stack.pop()
        self.parenthesis = self.parenthesis_stack.pop()

    def get_indent(self):
        start = self.input.position
        for index in reversed(range(self.input.position)):
            char = self.input.data[index]
            if char == '\n':
                return start - (index + 1)
            elif char != ' ':
                start = index
        return 0

    def dedent(self):
        # A dedent comes after a '\n'. Put it back, so the outer line
        # rule can handle the '\n'
        self.indent_stack.pop()
        input = self.input.prev()
        if input.head()[0] == '\n':
            self.input = input

    def is_keyword(self, keyword):
        return keyword in self.keywords
예제 #46
0
파일: calc.py 프로젝트: mithrandi/eridanus
class CalcGrammar(OMeta.makeGrammar(calcGrammar, globals())):
    pass
예제 #47
0
# If we've reached a '}meta' then stop matching, otherwise keep going
annotation_contents ::= <token '}meta'>						=> ''
                      | <anything> <annotation_contents>	=> ''

# A statement is a series of annotations or anything else
statement ::= <annotation>+									=> ''
            | <anything>+:a									=> ''.join(a)

# A program is a series of statements
program ::= <statement>+:a									=> ''.join(a)

"""

# Now we embed the transformations in every AST node, so that they can
# apply them recursively to their children
finder = OMeta.makeGrammar(strip_comments(annotation_finder), globals())


def strip_annotations(path_or_text):
    """This performs the translation from annotated Python to normal
	Python. It takes in annotated Python code (assuming the string to be
	a file path, falling back to treating it as raw code if it is not a
	valid path) and emits Python code."""
    # See if the given string is a valid path
    if os.path.exists(path_or_text):
        # If so then open it and read the file contents into in_text
        infile = open(path_or_text, 'r')
        in_text = '\n'.join([line for line in infile.readlines()])
        infile.close()
    # Otherwise take the string contents to be in_text
    else:
예제 #48
0
파일: common.py 프로젝트: set-soft/pymeta3
def makeHex(sign, hs):
    return int((sign or '') + ''.join(hs), 16)


def makeOctal(sign, ds):
    return int((sign or '') + '0' + ''.join(ds), 8)


def isDigit(x):
    return x in string.digits


def isOctDigit(x):
    return x in string.octdigits


def isHexDigit(x):
    return x in string.hexdigits


def contains(container, value):
    return value in container


def cons(first, rest):
    return [first] + rest


CommonParser = OMeta.makeGrammar(baseGrammar, globals(), "CommonParser")
예제 #49
0
파일: _compiler.py 프로젝트: up9inc/pybars4
class Compiler:
    """A handlebars template compiler.

    The compiler is not threadsafe: you need one per thread because of the
    state in CodeBuilder.
    """

    _handlebars = OMeta.makeGrammar(handlebars_grammar, {}, 'handlebars')
    _builder = CodeBuilder()
    _compiler = OMeta.makeGrammar(compile_grammar, {'builder': _builder})

    def __init__(self):
        self._helpers = {}
        self.template_counter = 1

    def _extract_word(self, source, position):
        """
        Extracts the word that falls at or around a specific position

        :param source:
            The template source as a unicode string
        :param position:
            The position where the word falls

        :return:
            The word
        """
        boundry = re.search(r'{{|{|\s|$', source[:position][::-1])
        start_offset = boundry.end() if boundry.group(0).startswith(
            '{') else boundry.start()

        boundry = re.search(r'}}|}|\s|$', source[position:])
        end_offset = boundry.end() if boundry.group(0).startswith(
            '}') else boundry.start()

        return source[position - start_offset:position + end_offset]

    def _generate_code(self, source):
        """
        Common compilation code shared between precompile() and compile()

        :param source:
            The template source as a unicode string

        :return:
            A tuple of (function, source_code)
        """

        if not isinstance(source, str_class):
            raise PybarsError("Template source must be a unicode string")

        source = self.whitespace_control(source)

        tree, (position, _) = self._handlebars(source).apply('template')

        if debug:
            print('\nAST')
            print('---')
            print(tree)
            print('')

        if position < len(source):
            line_num = source.count('\n') + 1
            beginning_of_line = source.rfind('\n', 0, position)
            if beginning_of_line == -1:
                char_num = position
            else:
                char_num = position - beginning_of_line
            word = self._extract_word(source, position)
            raise PybarsError("Error at character %s of line %s near %s" %
                              (char_num, line_num, word))

        # Ensure the builder is in a clean state - kinda gross
        self._compiler.globals['builder']._reset()

        output = self._compiler(tree).apply('compile')[0]
        return output

    def whitespace_control(self, source):
        """
        Preprocess source to handle whitespace control and remove extra block
        whitespaces.

        :param source:
            The template source as a unicode string
        :return:
            The processed template source as a unicode string
        """
        cleanup_sub = re.compile(
            # Clean-up whitespace control marks and spaces between blocks tags
            r'(?<={{)~|~(?=}})|(?<=}})[ \t]+(?={{)').sub

        return re.sub(
            # Whitespace control using "~" mark
            r'~}}\s*|\s*{{~|'

            # Whitespace around alone blocks tags that in a line
            r'(?<=\n)([ \t]*{{(#[^{}]+|/[^{}]+|![^{}]+|else|else if [^{}]+)}}[ \t]*)+\r?\n|'

            # Whitespace aroud alone blocks tag on the first line
            r'^([ \t]*{{(#[^{}]+|![^{}]+)}}[ \t]*)+\r?\n|'

            # Whitespace aroud alone blocks tag on the last line
            r'\r?\n([ \t]*{{(/[^{}]+|![^{}]+)}}[ \t]*)+$',
            lambda match: cleanup_sub('',
                                      match.group(0).strip()),
            source)

    def precompile(self, source):
        """
        Generates python source code that can be saved to a file for caching

        :param source:
            The template to generate source for - should be a unicode string

        :return:
            Python code as a unicode string
        """

        return self._generate_code(source).full_code

    def compile(self, source, path=None):
        """Compile source to a ready to run template.

        :param source:
            The template to compile - should be a unicode string

        :return:
            A template function ready to execute
        """

        container = self._generate_code(source)

        def make_module_name(name, suffix=None):
            output = 'pybars._templates.%s' % name
            if suffix:
                output += '_%s' % suffix
            return output

        if not path:
            path = '_template'
            generate_name = True
        else:
            path = path.replace('\\', '/')
            path = path.replace('/', '_')
            mod_name = make_module_name(path)
            generate_name = mod_name in sys.modules

        if generate_name:
            mod_name = make_module_name(path, self.template_counter)
            while mod_name in sys.modules:
                self.template_counter += 1
                mod_name = make_module_name(path, self.template_counter)

        mod = ModuleType(mod_name)
        filename = '%s.py' % mod_name.replace('pybars.', '').replace('.', '/')
        exec(compile(container.full_code, filename, 'exec', dont_inherit=True),
             mod.__dict__)
        sys.modules[mod_name] = mod
        linecache.getlines(filename, mod.__dict__)

        return mod.__dict__[container.name]

    def template(self, code):
        def _render(context, helpers=None, partials=None, root=None):
            ns = {
                'context': context,
                'helpers': helpers,
                'partials': partials,
                'root': root
            }
            exec(
                code +
                '\nresult = render(context, helpers=helpers, partials=partials, root=root)',
                ns)
            return ns['result']

        return _render
예제 #50
0
파일: html.py 프로젝트: set-soft/pymeta3
tag ::= ('<' <spaces> <name>:n <spaces> <attribute>*:attrs '>'
         <html>:c
         '<' '/' <token n> <spaces> '>'
             => [n.lower(), dict(attrs), c])

html ::= (<text> | <tag>)*

text ::= (~('<') <anything>)+:t => ''.join(t)

attribute ::= <spaces> <name>:k <token '='> <quotedString>:v => (k, v)

quotedString ::= (('"' | '\''):q (~<exactly q> <anything>)*:xs <exactly q>
                     => ''.join(xs))

"""
TinyHTML = OMeta.makeGrammar(tinyHTMLGrammar, globals(), name="TinyHTML")


def formatAttrs(attrs):
    """
    Format a dictionary as HTML-ish attributes.
    """
    return ''.join([" %s='%s'" % (k, v) for (k, v) in attrs.items()])


unparserGrammar = """
contents ::= [<tag>*:t] => ''.join(t)
tag ::= ([:name :attrs <contents>:t]
            => "<%s%s>%s</%s>" % (name, formatAttrs(attrs), t, name)
         | <anything>)
"""
예제 #51
0
class Py2GPUGrammar(OMeta.makeGrammar(py2gpu_grammar, vars, name="Py2CGrammar")):
    def raise_parse_error(self, node, error, message=''):
        lineno = getattr(node, 'lineno', 1)
        col_offset = getattr(node, 'col_offset', 1)
        if message:
            message = ': ' + message
        raise ParseError(lineno, error,
            'Parse error at line %d, col %d (node %s)%s' % (
                lineno, col_offset, node.__class__.__name__, message))

    @property
    def func_name(self):
        return getattr(self, '_func_name', None)

    def parse(self, data, rule='grammar', *args):
        # print data, rule, [(getattr(item, 'lineno', 0), getattr(item, 'col_offset', 0)) for item in (data if isinstance(data, (tuple, list)) else [data])]
        if not isinstance(data, (tuple, list)):
            data = (data,)
        try:
            grammar = self.__class__(data)
            grammar._func_name = self.func_name
            result, error = grammar.apply(rule, *args)
        except ParseError:
            self.raise_parse_error(data, None, 'Unsupported node type')
        try:
            head = grammar.input.head()
        except EOFError:
            pass
        else:
            self.raise_parse_error(head[0], error)
        return result

    def gen_subscript(self, name, indices, assigning):
        dims = len(indices)
        if '->' in name:
            assert dims == 1, 'Attribute values can only be one-dimensional.'
            name, attr = name.rsplit('->', 1)
            if attr == 'offset':
                info = _gpu_funcs[self.func_name]
                blockshapes = info['blockshapes']
                overlapping = info['overlapping']
                threadmemory = info['threadmemory']
                center_on_origin = info['center_on_origin']
                if name in threadmemory:
                    return '0'
                shape = blockshapes.get(name)
                if shape:
                    try:
                        dim = int(indices[0])
                    except:
                        raise ValueError('Offset must be an integer')
                    return self.get_block_init(name, dim, shape[dim],
                        name in overlapping, center_on_origin)[2]
                else:
                    raise ValueError('%s is not an array' % name)
            elif attr == 'shape':
                shape = '__%s_shape2' % name
                for dim in reversed(range(2)):
                    shape = '(%s == %d ? __%s_shape%d : %s)' % (indices[0], dim, name, dim, shape)
                return shape
            else:
                return '%s->%s[%s]' % (name, attr, indices[0])
        access = []
        shifted_indices = []
        for dim, index in enumerate(indices):
            shifted_indices.append(index)
            access.append(' * '.join(['__%s_shape%d' % (name, subdim)
                                      for subdim in range(dim+1, dims)] + [index]))
        subscript = '%s[%s]' % (name, ' + '.join(access))
        return subscript

    def gen_func(self, func, level):
        name = func.name
        info = _gpu_funcs[name]

        # Store function name, so it can be reused in other parser rules
        self._func_name = name
        info['funcnode'] = func

        types = info['types']
        threadmemory = info['threadmemory']
        args = set(arg.id for arg in func.args.args)
        vars = set(types.keys()).symmetric_difference(
            args).symmetric_difference(threadmemory.keys())
        vars = '\n'.join('%s %s;' % (types[var][3], var) for var in vars
                         if var != 'return')

        # Calculate register requirements before parsing the function body
        maxthreads = None
        # TODO: actually calculate the real number of unused registers
        unused_registers = 8192 - 600
        needed_registers = 0
        for var, shape in threadmemory.items():
            size = numpy.array(shape).prod()
            needed_registers += size
            vars += '\n%s %s[%d];' % (types[var][3], var, size)
        if needed_registers:
            threads = unused_registers / needed_registers
            # If there aren't enough registers we fall back to local memory
            # and use a relatively high number of threads to compensate for
            # the slower memory access
            if threads < 96:
                threads = 64
            x_threads = int(sqrt(threads))
            maxthreads = (threads // x_threads, x_threads, 1)
        info['maxthreads'] = maxthreads

        if vars:
            vars += '\n\n'
        data = {
            'func': make_prototype(func, '__device__', name, info),
            'body': indent_source(level+1, vars) + self.parse(func.body, 'body', level+1),
        }
        source = _func_template % data

        # Functions with return values can't be called from the CPU
        if types.get('return'):
            self._func_name = None
            return source

        # Generate kernel that shifts block by offset and calls device function
        blockinit = []
        blockshapes = info['blockshapes']
        overlapping = info['overlapping']
        center_on_origin = info['center_on_origin']

        args = []
        for arg in func.args.args:
            origarg = arg = arg.id
            kind = types[arg][1]

            if kind.endswith('Array') and arg in blockshapes:
                arg = '__array_' + arg

            shape = blockshapes.get(origarg)
            if shape:
                offsetinit = []
                blockinit.append('%s *%s;' % (types[origarg][3], arg))
                blockinit.append('if (%s != NULL) {' % origarg)
                for dim, dimlength in enumerate(shape):
                    block, limit, shift = self.get_block_init(origarg, dim,
                        dimlength, origarg in overlapping, center_on_origin)
                    blockinit.append('    if (%s >= %s)\n        return;' % (block, limit))
                    if dim == len(shape) - 1:
                        offsetinit.append(shift)
                    else:
                        offsetinit.append('%s * %s' % (' * '.join('__%s_shape%d' % (origarg, subdim) for subdim in range(dim+1, len(shape))), shift))
                blockinit.append('    %s = %s + %s;' % (
                    arg, origarg, ' + '.join(offsetinit)))
                blockinit.append('} else {')
                blockinit.append('    %s = %s;' % (arg, origarg))
                blockinit.append('}')

            args.append(arg)

            if kind.endswith('Array'):
                args.extend('__%s_shape%d' % (origarg, dim) for dim in range(3))

        bodydata = {
            'declarations': '%s' % '\n'.join(blockinit),
            'call': '%s(%s);' % (func.name, ', '.join(args)),
        }
        data['func'] = make_prototype(func, '__global__', '__kernel_' + name, info)
        data['body'] = indent_source(level+1, _kernel_body % bodydata)
        source += _func_template % data

        # Reset function context
        self._func_name = None

        return source

    def get_block_init(self, name, dim, dimlength, overlapping, center_on_origin):
        block = 'BLOCK(%d)' % dim
        if overlapping:
            if center_on_origin:
                limit = '__%s_shape%d' % (name, dim)
                shift = '(%s - %s/2)' % (block, dimlength)
            else:
                limit = '__%s_shape%d - (%s - 1)' % (name, dim, dimlength)
                shift = block
        else:
            limit = '__%s_shape%d/%s' % (name, dim, dimlength)
            shift = '%s * %s' % (block, dimlength)
        return block, limit, shift

    def gen_call(self, call):
        assert call.starargs is None
        assert call.kwargs is None
        info = _gpu_funcs[self.func_name]
        types = info['types']
        name = self.parse(call.func, 'varaccess')
        args = []
        for arg in [self.parse(arg, 'op') for arg in call.args]:
            args.append(arg)
            typeinfo = types.get(arg)
            if typeinfo and typeinfo[1].endswith('Array'):
                args.extend(self._get_dim_args(arg))
            elif arg == 'NULL':
                args.extend(3 * ('0',))
        if '->' in name:
            arg, name = name.split('->', 1)
            if info['types'][arg][2].name == 'float32':
                name = 'f' + name
            shape = info['blockshapes'].get(arg) or info['threadmemory'].get(arg)
            name += '%dd' % len(shape)
            dimargs = ', '.join(self._get_dim_args(arg))
            args.insert(0, '%s, %s' % (arg, dimargs))
            args.extend(str(arg) for arg in shape)
            name = '__array_' + name
        elif name in ('int', 'float', 'min', 'max', 'sqrt', 'log', 'abs'):
            # These functions are specialized via templates
            name = '__py_' + name
        return '%s(%s)' % (name, ', '.join(args))

    def _get_dim_args(self, arg):
        info = _gpu_funcs[self.func_name]
        threadmemory = info['threadmemory']
        shape = threadmemory.get(arg)
        if shape:
            shape += (3 - len(shape)) * (1,)
            return ('%d' % dim for dim in shape)
        return ('__%s_shape%d' % (arg, dim) for dim in range(3))

    def gen_for(self, node, level):
        assert not node.orelse, 'else clause not supported in for loops'
        rangespec = tuple(self.parse(node.iter, 'range'))
        length = len(rangespec)
        assert length in range(1, 4), 'range() must get 1-3 parameters'
        if length == 3:
            start, stop, step = rangespec
        elif length == 2:
            start, stop = rangespec
            step = 1
        else:
            start, step = 0, 1
            stop = rangespec[0]
        return _for_loop % {
            'name': self.parse(node.target, 'name'),
            'body': self.parse(node.body, 'body', level+1),
            'indent': indent(level),
            'start': start, 'stop': stop, 'step': step,
        }

    def gen_while(self, node, level):
        assert not node.orelse, 'else clause not supported in for loops'
        return _while_loop % {
            'test': self.parse(node.test, 'op'),
            'body': self.parse(node.body, 'body', level+1),
            'indent': indent(level),
        }
예제 #52
0
 def __init__(self, string, globals=None):
     OMeta.__init__(self, string, globals)
     self.assertion_patterns = []