예제 #1
0
 def setUp(self):
     import langscape
     self.python = langscape.load_langlet("python")
     self.cover = langscape.load_langlet("coverage")
     from langscape.base.cstfunction import CSTFunction
     self.fn_py = CSTFunction(self.python)
     self.fn_cov = CSTFunction(self.cover)
예제 #2
0
def test_subnfa():
    python = langscape.load_langlet("python")
    for nfa in python.parse_nfa.nfas.values():
        trans = list(nfa[2])
        state = trans[len(trans)/2]
        pprint_nfa(compute_subnfa(nfa, state), python)
        break
예제 #3
0
def test_subnfa():
    python = langscape.load_langlet("python")
    for nfa in python.parse_nfa.nfas.values():
        trans = list(nfa[2])
        state = trans[len(trans) / 2]
        pprint_nfa(compute_subnfa(nfa, state), python)
        break
예제 #4
0
def run_tests(name = "", exclude = ()):
    '''
    This module provides limited test discovery. Unittest is not sufficient because it would reject
    non-Python modules. The strategy compprises walking through the langscape subdirectories, searching
    for directories named 'tests'. In those directories test_<name>.<suffix> files will be identified
    and executed as scripts.
    '''
    remove_pyc()
    testpaths = []
    for P in path(langscape.__file__).dirname().walkdirs():
        S = P.splitall()
        if ".hg" in S:
            continue
        elif S[-1] == "tests":
            if "langlets" in S:
                testpaths.append((P, S[-2]))
            else:
                testpaths.append((P,""))
    langlet = langscape.load_langlet("python")
    log = open("log.txt", "w")
    for pth, nm in testpaths:
        if (name and name != nm) or nm in exclude:
            continue
        for f in pth.files():
            if nm and langlet.config.langlet_name!=nm:
                print "-"*70
                print "Load Langlet: ", nm
                langlet = langscape.load_langlet(nm)
            if f.basename().startswith("test_") and f.basename().endswith(langlet.config.source_ext):
                try:
                    print >> log, f
                    langlet.run_module(f)
                except Exception, e:
                    print "Failed to run", langlet
                    print " "*15, f
                    e = traceback.format_exc()
                    print e
                    return
예제 #5
0
def test2():
    from langscape import load_langlet
    cfuncall = load_langlet("cfuncall")
    import urllib2
    f = urllib2.urlopen("http://codespeak.net/svn/xpython/trunk/dist/src/Objects/classobject.c")
    source = f.read()
    from langscape.sourcetools.search import CSearchObject
    so = CSearchObject(cfuncall, cfuncall.symbol.funcall)
    from langscape.csttools.cstsearch import find_node, find_all
    for i, m in enumerate(so.finditer(source)):
        print i, m.matched


    '''
예제 #6
0
def test2():
    langlet = langscape.load_langlet("nfa2gra_test")
    nfa2gra = NFA2GrammarTranslator(langlet)
    n = 0
    for i in range(1, 68):
        try:
            print "-" * 70
            print str(i)
            nfa = langlet.parse_nfa.nfas[getattr(langlet.symbol, "stmt" + str(i))]
            R, res = nfa2gra.check_translation(nfa, False)
            assert res  # , nfa2gra.check_translation(nfa)
        except Exception:
            print "FAIL", i, R
            n += 1
        print "-" * 70
    print "FAILURES", n

    nfa = langlet.parse_nfa.nfas[langlet.symbol.stmt9]
    print nfa2gra.check_translation(nfa, False)
예제 #7
0
def test1():
    import langscape
    python = langscape.load_langlet("python")
    so = CSearchObject(python, python.parse_symbol.return_stmt)
    so.condition = lambda x: True
    text = open(r"codesearch.py").read()
    '''
    textit = so.finditer(text)
    for item in textit:
        print item.matched
    # print so.begin, so.end
    # print text[so.begin: so.end]
    '''
    def subst_return(langlet, node):
        test = find_node(node, langlet.symbol.test)
        if test:
            test[:] = langlet.fn.test(langlet.fn.CallFunc("measure", [test[:]]))
        return node

    cso = CSearchObject(python, python.parse_symbol.return_stmt)
    res = cso.subst(text, subst_return)
    print res
예제 #8
0
def test2():
    import langscape
    langlet = langscape.load_langlet("python")

    from langscape.base.grammar_gen import find_langlet_grammar
    grammar_type = "GrammarGen.g"
    source = find_langlet_grammar(langlet, grammar_type)

    nfagen = NFAGenerator(langlet)
    rules  = nfagen.create_all()
    for r, nfa in langlet.parse_nfa.nfas.items():
        table = rules[r][2]
        for state, follow in nfa[2].items():
            F = table.get(state,[])
            if sorted(follow)!=sorted(F):
                print "NFAs are different"
                print rules[r][0]
                pprint.pprint(table)
                print "-----------------"
                pprint.pprint(nfa[2])
                print
                break
예제 #9
0
def test3():
    import langscape
    langlet = langscape.load_langlet("python")
    nfagen = NFAGenerator(langlet)
    rule = nfagen.from_ebnf("if_stmt: 'if' test [ as_name] ':' suite ('elif' test [ as_name] ':' suite)* ['else' ':' suite]")
    print rule[0]
예제 #10
0
            if c0 in "=!:,;'\"":
                return " " + text
            else:
                return text

    def format_quote(self, c0, c1, text):
        if self.isquote(c1):
            if c0 in " .[({":
                return text
            else:
                return " " + text

    def format_cmp(self, c0, c1, text):
        if self.iscmp(c1):
            if c0 in ".[({":
                return text
            else:
                return " " + text


if __name__ == '__main__':
    source = open("cstunparser.py").read()
    import pprint
    import langscape
    import cProfile as profile
    coverage = langscape.load_langlet("coverage")
    coverage.options["refactor_mode"] = False
    cst = coverage.parse(source)
    S = coverage.unparse(cst)
    print S
예제 #11
0
__all__ = ["refine", "LangletExpr", "RuleTemplate"]

import os, pprint
import langscape
import langscape
from langscape.ls_const import*
from langscape.csttools.cstsearch import*
from langscape.trail.nfaparser import NFAParser, TokenStream
from langscape.trail.nfatools  import compute_span_traces
from langscape.langlets.ls_grammar.grammar_object import GrammarObject
from langscape.sourcetools.sourcegen import SourceGenerator

rule_template = langscape.load_langlet("rule_template")
ls_grammar    = langscape.load_langlet("ls_grammar")


class TraceObject(TokenStream):
    def get_current(self):
        try:
            return self.tokstream[self.position]
        except IndexError:
            raise StopIteration


class TraceChecker(NFAParser):
    def __init__(self, langlet):
        self.langlet    = langlet
        self.rules      = langlet.parse_nfa.nfas
        self.reach      = langlet.parse_nfa.reachables
        self.keywords   = langlet.parse_nfa.keywords
        self.expanded   = langlet.parse_nfa.expanded
예제 #12
0
                s = " "*indent+nid_text
        for item in node[1:]:
            if isinstance(item, list):
                if depth > 0:
                    s+=self.node_to_text(item, indent = indent+self.INDENT, depth = depth-1, mark = mark)
                elif depth == 0:
                    # use flat = 2 to indicate that no further nesting shall be applied
                    s+=self.node_to_text(item, indent = indent+self.INDENT, depth = -1, mark = mark)
                else:
                    pass
            elif isinstance(item, str):
                s+=" "*(indent+self.INDENT)+self.wrap_whitespace(item)+"\n"
        if indent == 0:
            return "\n"+s+"\n"
        else:
            return s

    def pformat(self, node, depth = 10000, mark = ()):
        return self.node_to_text(node, indent = 0, depth = depth, mark = mark)

    def pprint(self, node, depth = 10000, mark = (), line = -1):
        #if line:
        print self.pformat(node, depth = depth, mark = mark)

if __name__ == '__main__':
    import langscape
    langlet = langscape.load_langlet("python")
    cst = langlet.parse("1+1")
    import pprint
    langlet.pprint(cst, depth=100)
예제 #13
0
def test_span():
    python = langscape.load_langlet("python")
    nfa = python.parse_nfa.nfas[1016]
    #pprint.pprint(nfa)
    pprint.pprint(compute_span_traces(nfa))
예제 #14
0
                    return tree
                else:
                    last_set = self.langlet.parse_nfa.last_set[nid]
                    S = [s for s in expected if s in last_set]
                    if S:
                        (s, arg) = self.completion(tree, [FAIL], S, {})
                    else:
                        (s, arg) = self.completion(tree, [FAIL], expected, {})




if __name__ == '__main__':
    import langscape
    import pprint
    python = langscape.load_langlet("python")
    symbol = python.parse_symbol
    builder = CSTBuilder(python)
    from cstsearch import*

    #python.pprint( builder.build_cst(symbol.simple_stmt, a1[0], ';', a1[0]) )

    p4d  = langscape.load_langlet("p4d")
    st = p4d.parse("1\n")
    '''
    a1 = find_node(st, p4d.parse_symbol.atom)

    for simple_stmt in find_all(p4d.transform(p4d.parse("elm x:\n y\n")), python.parse_symbol.simple_stmt):
        print python.unparse(simple_stmt)
        python.check_node(simple_stmt)
        for n in simple_stmt[1:]:
예제 #15
0
파일: gpgen.py 프로젝트: leomauro/langscape
from langscape.csttools.cstutil import is_keyword

def get_token_string(langlet, nid):
    if is_keyword(nid):
        return langlet.keywords[nid]
    if nid+SYMBOL_OFFSET in langlet.lex_nfa.constants:
        return langlet.lex_nfa.constants[nid]
    else:
        return langlet.get_node_name(nid)

if __name__ == '__main__':
    import langscape
    import pprint
    import cProfile

    langlet = langscape.load_langlet("ls_grammar")
    tracegen = TraceGen(langlet)
    traces = tracegen.run(start = langlet.symbol.rhs, maxlen = 4, exclude = [langlet.token.STRING])
    f= open("grammar_traces.py", "w")
    for trace in traces:
        print >>f, [get_token_string(langlet, s) for s in trace]



    #cProfile.run("tracegen.run(length = 2)")
    #for item in tracegen.run(length = 3):
    #    print item



예제 #16
0
        while True:
            tt, trace = tracers.pop()
            selectables = tt.selectables()
            print selectables, len(tracers)
            if FIN in selectables:
                return trace
            else:
                for s in selectables:
                    cloned = tt.clone()
                    cloned.select(s)
                    tracers.insert(0, (cloned, trace+[s]))


if __name__ == '__main__':
    import langscape
    ls_grammar = langscape.load_langlet("ls_grammar")
    stream = ls_grammar.tokenize("a:( b )\n")
    tracer = TokenTracer(ls_grammar)
    print tracer.check(stream)
    print [S for S in tracer.state if S[0] is FIN]

    python = langscape.load_langlet("python")
    stream = python.tokenize("def foo():\n print 42\ndef bar():\n print 47\n")

    tracer = TokenTracer(python, start = python.parse_symbol.funcdef)

    for tok in stream:
        try:
            selection = tracer.select(tok[0])
            print tok
        except NonSelectableError:
예제 #17
0
 def setUp(self):
     import langscape
     from langscape.langlets.python.cstfunction import LangletCSTFunction
     self.langlet = langscape.load_langlet("coverage")
     self.fn = LangletCSTFunction(self.langlet)
예제 #18
0
 def __init__(self, langlet):
     self.langlet = langlet
     self.python  = langscape.load_langlet("python")
예제 #19
0
# Module used to translate a BNF grammar into an EBNF grammar using
# left recusion elimination

__all__ = ["convertbnf", "eliminate_left_recursion"]

import pprint
import langscape
from langscape.csttools.cstsearch import find_node, find_all
from langscape.csttools.cstutil import*
from langscape.trail.nfagen import NFAGenerator
from langscape.trail.nfa2grammar import NFA2GrammarTranslator
from langscape.base.grammar_gen import SymbolObject
from langscape.util import flip

bnfreader  = langscape.load_langlet("bnfreader")
ls_grammar = langscape.load_langlet("ls_grammar")

def transform_recursion(nfa, state = None):
    '''
    :param nfa: NFA containing ``state``.
    :param state: state of kind (S,..., T) where S is the nid of the nfa.

    Description ::
        Let L be the state passed to this function.

        1) We map L using ShiftNested: (S, idx, 0, T) -> (S, idx, TRAIL_OPEN, T)
        2) We replace L by ShiftNested(L) in the NFA.
           If A -> [L, X, Y, ...] is a transition we collect X, Y, ... within a separate
           list called Cont.
        3) Replace (FIN, FEX, 0, S) by (S, idx, TRAIL_CLOSE, S) on each occurence.
        4) Add the following transition:
예제 #20
0
 def setUp(self):
     import langscape
     self.p4d = langscape.load_langlet("p4d")
     self.cstbuilder = CSTBuilder(self.p4d)
     self.symbol = self.p4d.parse_symbol
     self.token = self.p4d.parse_token
예제 #21
0
            if c0 in "=!:,;'\"":
                return " "+text
            else:
                return text

    def format_quote(self, c0, c1, text):
        if self.isquote(c1):
            if c0 in " .[({":
                return text
            else:
                return " "+text

    def format_cmp(self, c0, c1, text):
        if self.iscmp(c1):
            if c0 in ".[({":
                return text
            else:
                return " "+text


if __name__ == '__main__':
    source = open("cstunparser.py").read()
    import pprint
    import langscape
    import cProfile as profile
    coverage = langscape.load_langlet("coverage")
    coverage.options["refactor_mode"] = False
    cst = coverage.parse(source)
    S = coverage.unparse(cst)
    print S
예제 #22
0
 def setUp(self):
     import langscape
     self.python = langscape.load_langlet("python")
     self.cstbuilder = CSTBuilder(self.python)
     self.symbol = self.python.parse_symbol
     self.token = self.python.parse_token
예제 #23
0
def test_branchpoints():
    python = langscape.load_langlet("python")
    nfa = python.lex_nfa.nfas[1003]
    pprint_nfa(nfa, python)
    print len(nfa[2])
    return compute_branch_points(nfa)
예제 #24
0
 def setUp(self):
     import langscape
     self.langlet = langscape.load_langlet("gallery")
     self.cstbuilder = CSTBuilder(self.langlet)
     self.symbol = self.langlet.parse_symbol
     self.token = self.langlet.parse_token
예제 #25
0
import langscape
p4d = langscape.load_langlet("p4d")
p4d.importer.import_module("test_p4d")
p4d.importer.import_module("test_bytelet")



예제 #26
0
 def setUp(self):
     import langscape
     self.python = langscape.load_langlet("python")
예제 #27
0
        ERR_TOKEN = self.langlet.get_node_name(error_token[0], self.typ)
        rule += " "+ERR_TOKEN+"\n\n"
        rule += " "*(n2-n1+5)+"^"*len(ERR_TOKEN)
        return rule

    def format_terminals(self, terminals):
        kwds = []
        symbols = []
        for t in terminals:
            if is_keyword(t):
                kwds.append(self.langlet.get_node_name(t))
            else:
                token = self.langlet.parse_token
                symbols.append(token.symbol_map.get(t, token.sym_name.get(t, str(t))))
        s = ["\nOne of the following symbols must be used:\n"]
        if kwds:
            s.append("    Keywords")
            for k in kwds:
                s.append("             %s"%k)
        if symbols:
            s.append("    Symbols")
            for k in symbols:
                s.append("             %s"%k)
        return "\n".join(s)


if __name__ == '__main__':
    import langscape as ls
    langlet = ls.load_langlet("python")
    langlet.tokenize("foo(0x89) :?")
예제 #28
0
        while True:
            tt, trace = tracers.pop()
            selectables = tt.selectables()
            print selectables, len(tracers)
            if FIN in selectables:
                return trace
            else:
                for s in selectables:
                    cloned = tt.clone()
                    cloned.select(s)
                    tracers.insert(0, (cloned, trace + [s]))


if __name__ == '__main__':
    import langscape
    ls_grammar = langscape.load_langlet("ls_grammar")
    stream = ls_grammar.tokenize("a:( b )\n")
    tracer = TokenTracer(ls_grammar)
    print tracer.check(stream)
    print[S for S in tracer.state if S[0] is FIN]

    python = langscape.load_langlet("python")
    stream = python.tokenize("def foo():\n print 42\ndef bar():\n print 47\n")

    tracer = TokenTracer(python, start=python.parse_symbol.funcdef)

    for tok in stream:
        try:
            selection = tracer.select(tok[0])
            print tok
        except NonSelectableError:
예제 #29
0
 def setUp(self):
     import langscape
     self.python = langscape.load_langlet("python")
예제 #30
0
# Module used to translate a BNF grammar into an EBNF grammar using
# left recusion elimination

__all__ = ["convertbnf", "eliminate_left_recursion"]

import pprint
import langscape
from langscape.csttools.cstsearch import find_node, find_all
from langscape.csttools.cstutil import *
from langscape.trail.nfagen import NFAGenerator
from langscape.trail.nfa2grammar import NFA2GrammarTranslator
from langscape.base.grammar_gen import SymbolObject
from langscape.util import flip

bnfreader = langscape.load_langlet("bnfreader")
ls_grammar = langscape.load_langlet("ls_grammar")


def transform_recursion(nfa, state=None):
    '''
    :param nfa: NFA containing ``state``.
    :param state: state of kind (S,..., T) where S is the nid of the nfa.

    Description ::
        Let L be the state passed to this function.

        1) We map L using ShiftNested: (S, idx, 0, T) -> (S, idx, TRAIL_OPEN, T)
        2) We replace L by ShiftNested(L) in the NFA.
           If A -> [L, X, Y, ...] is a transition we collect X, Y, ... within a separate
           list called Cont.
        3) Replace (FIN, FEX, 0, S) by (S, idx, TRAIL_CLOSE, S) on each occurence.