Пример #1
0
def parse(fileList, dirName):

    topRules = []

    for f in fileList:

        #join filename with current directory path
        fileName = os.path.join(dirName, f)

        #if f is a file, parse and extract rules
        if os.path.isfile(fileName):
            char_stream = antlr3.ANTLRFileStream(fileName)
            lexer = SpeakPythonJSGFLexer(char_stream)
            tokens = antlr3.CommonTokenStream(lexer)

            #			for t in lexer:
            #				print t;

            parser = SpeakPythonJSGFParser(tokens)
            parser.prog()

            #get the list of top-level rules
            tr = parser.rules

        otherFileParse = ([], "")

        #if f is a dir, pass list of files into recursive call
        if os.path.isdir(fileName):
            subFiles = os.listdir(fileName)
            otherFileParse = parse(subFiles, fileName)

        tr.extend(otherFileParse[0])

        ruleFileName = re.sub(r"[\.].*", "", fileName)
        ruleFileName = re.sub(r"/;:", "_", ruleFileName)

        #accumulate all alias rules together while using the file path as a prefix so as not to overlap aliases
        aliasText = ""
        for ar in parser.aliasRules:

            #fix the alias references in the expressions to fit the prefixed version to
            alteredExp = re.sub(r"<([^>]+)>", "<" + ruleFileName + r"_\1>",
                                parser.aliasRules[ar])

            #concat finished alias rule together
            print alteredExp
            aliasText += "<" + ruleFileName + "_" + ar + "> = " + alteredExp + ";\n"

        aliasText += "\n"

        #prefix alias names of the top-most rules associated with the urrently parsed file
        topRules = []
        for r in tr:
            alteredExp = re.sub(r"<([^>]+)>", "<" + ruleFileName + r"_\1>", r)
            topRules.append(alteredExp)

    return (topRules, aliasText + otherFileParse[1], ruleFileName)
Пример #2
0
def main(argv):
    char_stream = antlr3.ANTLRFileStream(argv[1], encoding='utf-8')
    lexer = solLexer(char_stream)
    tokens = antlr3.CommonTokenStream(lexer)
    pp = solParser(tokens)
    prgm = pp.sourceUnit()
    tree = prgm.tree

    print()
Пример #3
0
    def execute(self, argv):
        options, args = self.parseOptions(argv)

        self.setUp(options)

        if options.interactive:
            while True:
                try:
                    input = eval(input(">>> "))
                except (EOFError, KeyboardInterrupt):
                    self.stdout.write("\nBye.\n")
                    break

                inStream = antlr3.ANTLRStringStream(input)
                self.parseStream(options, inStream)

        else:
            if options.input is not None:
                inStream = antlr3.ANTLRStringStream(options.input)

            elif len(args) == 1 and args[0] != '-':
                inStream = antlr3.ANTLRFileStream(args[0],
                                                  encoding=options.encoding)

            else:
                inStream = antlr3.ANTLRInputStream(self.stdin,
                                                   encoding=options.encoding)

            if options.profile:
                try:
                    import cProfile as profile
                except ImportError:
                    import profile

                profile.runctx('self.parseStream(options, inStream)',
                               globals(), locals(), 'profile.dat')

                import pstats
                stats = pstats.Stats('profile.dat')
                stats.strip_dirs()
                stats.sort_stats('time')
                stats.print_stats(100)

            elif options.hotshot:
                import hotshot

                profiler = hotshot.Profile('hotshot.dat')
                profiler.runctx('self.parseStream(options, inStream)',
                                globals(), locals())

            else:
                self.parseStream(options, inStream)
Пример #4
0
 def Compile(self, path_to_input):
     char_stream = antlr3.ANTLRFileStream(path_to_input, "gbk")
     lexer = protobufLexer(char_stream)
     tokens = antlr3.CommonTokenStream(lexer)
     parser = protobufParser(tokens)
     p =  parser.prog()
     #print p.tree.toString()
     root = p.tree
     nodes = antlr3.tree.CommonTreeNodeStream(root)
     nodes.setTokenStream(tokens)
     walker = protobufWalker(nodes)
     walker.templateLib = self.templates
     return walker.prog().toString()
Пример #5
0
 def parse_file(cls, input, input_string=False):
     if not input_string:
         char_stream = antlr3.ANTLRFileStream(input)
     else:
         char_stream = antlr3.ANTLRStringStream(input)
     lexer = cls.Lexer(char_stream)
     tokens = antlr3.CommonTokenStream(lexer)
     parser = cls.Parser(tokens)
     result = parser.prog()
     if len(lexer.error_list) > 0:
         raise CongressException("Lex failure.\n" +
                                 "\n".join(lexer.error_list))
     if len(parser.error_list) > 0:
         raise CongressException("Parse failure.\n" + \
             "\n".join(parser.error_list))
     return result.tree
Пример #6
0
 def load_from_file(self, filename):
     """Load a fuzzy system from FCL file."""
     encoding = None
     f = None
     try:
         # read first line
         f = open(filename)
         line = f.readline()
         import re
         # check for coding
         result = re.search(r'coding[=:]\s*([-\w.]+)', line)
         if result:
             # found one and use it
             encoding = result.group(1)
     except:
         # ok, then try without encoding
         pass
     if f:
         f.close()
     return self.__load(antlr3.ANTLRFileStream(filename, encoding))
Пример #7
0
    def load(self, filename):
        """Loads the given dgdl file
            :param filename the name of the DGDL file to load
            :type filename str
        """

        filename = self.get_file(filename)

        if os.path.isfile(filename):
            try:
                char_stream = antlr3.ANTLRFileStream(filename)
                lexer = dgdlLexer(char_stream)
                tokens = antlr3.CommonTokenStream(lexer)
                parser = dgdlParser(tokens)
                return parser.system().tree
            except Exception as e:
                traceback.print_exc()
                return None
        else:
            return None
Пример #8
0
def make_jasmin_file(src_filename, dest_filename, tokens_filename=''):
    # Run lexer
    char_stream = antlr3.ANTLRFileStream(src_filename, encoding='utf8')
    lexer = ListLangLexer.ListLangLexer(char_stream)
    tokens = antlr3.CommonTokenStream(lexer)

    if tokens_filename:
        tokens_out(tokens.getTokens(), tokens_filename)

    # Get AST tree
    parser = ListLangParser.ListLangParser(tokens)
    ast = parser.program().tree

    #print ast.toStringTree()

    errors = error_processor.get_all_errors()
    if errors:
        sys.stderr.write('\n'.join(errors))
        sys.exit(1)

    nodes = antlr3.tree.CommonTreeNodeStream(ast)
    nodes.setTokenStream(tokens)

    walker = ListLangWalker.ListLangWalker(nodes)

    try:
        target_code = walker.program()
    except error_processor.SemanticException as e:
        error_processor.add_error(error_processor.SEMANTIC, e.line,
                                  e.pos_in_line, e.message)

    errors = error_processor.get_all_errors()
    if errors:
        sys.stderr.write('\n'.join(errors))
        sys.exit(1)

    if target_code:
        target_file = open(dest_filename, 'w')
        target_file.write(target_code)
        target_file.close()
Пример #9
0
def parse(conn, fileList, dirName):

	parser = None;
	otherGlobalTests = {};

	for f in fileList:
	
		#join filename with current directory path	
		fileName = os.path.join(dirName, f);

		#if f is a file, parse and insert into db
		if os.path.isfile(fileName):

			char_stream = antlr3.ANTLRFileStream(fileName);
			lexer = SpeakPythonLexer(char_stream);
			tokens = antlr3.CommonTokenStream(lexer);

#			for t in lexer:
#				print t;

			parser = SpeakPythonParser(tokens);
			parser.prog();

			insertIntoDB(conn, parser.matches, parser.aliases);

		#if f is a dir, pass list of files into recursive call
		if os.path.isdir(fileName):
			subFiles = os.listdir(fileName);
			otherGlobalTests = parse(conn, subFiles, fileName);

	globalTests = {};

	if parser == None:
		print "Parser not defined."
	else:
		globalTests = parser.globalTests;

	globalTests.update(otherGlobalTests);

	return globalTests;
Пример #10
0
 def parse_file(self, path):
     """Parse from a file specififed by path."""
     return self._parse(antlr3.ANTLRFileStream(path))
Пример #11
0
 def load_from_file(self,filename):
     return self.__load(antlr3.ANTLRFileStream(filename))
Пример #12
0
def convert(path,verbose=0):
    char_stream = antlr3.ANTLRFileStream(path,encoding='utf-8')
    return convert_charstream(char_stream,verbose)
Пример #13
0
from jshadobf.common.tree_printer import *
import jshadobf.parser

if __name__ == "__main__":
    sys.setrecursionlimit(1000000)

    #input = sys.stdin.read()
    if len(sys.argv) > 1:
        prg = sys.argv[1]
    else:
        print "no input prgm"
        sys.exit()

#~ input = open(prg).read()
#~ print help(antlr3.ANTLRStringStream)
    char_stream = antlr3.ANTLRFileStream(prg, encoding='utf-8')
    ## or to parse a file:
    ## char_stream = antlr3.ANTLRFileStream(path_to_input)
    ## or to parse an opened file or any other file-like object:
    ## char_stream = antlr3.ANTLRInputStream(file)

    lexer = JavaScriptLexer(char_stream)

    lexernames = {}
    #     for n in dir(JavaScriptParser):
    #         lexernames[getattr(JavaScriptParser,n)] = n

    while True:
        nt = lexer.nextToken()
        print lexernames
        print "<%s,%s> " % (str(nt.type), str(nt.text))
Пример #14
0

xkbfilename = "gr"
if len(sys.argv) > 1:
    xkbfilename = sys.argv[1]

try:
    xkbfile = open(xkbfilename, 'r')
except OSError:
    print "Could not open file ", xkbfilename, ". Aborting..."
    sys.exit(-1)

xkbfile.close

# char_stream = antlr3.ANTLRFileStream(xkbfilename, encoding='utf-8')
char_stream = antlr3.ANTLRFileStream(xkbfilename)
lexer = XKBGrammarLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = XKBGrammarParser(tokens)

result = parser.layout()

print "XXXXXXXXXXXXXXXXXXXXXXX", xkbfilename
print "tree =", result.tree.toStringTree()

nodes = antlr3.tree.CommonTreeNodeStream(result.tree)
nodes.setTokenStream(tokens)
walker = XKBGrammarWalker(nodes)
# walker.layout()

MAX = 10
Пример #15
0
import sys
import antlr3
import stringtemplate3
from CMinusParser import CMinusParser
from CMinusLexer import CMinusLexer

if len(sys.argv) == 2:
    templateFileName = "Java.stg"
    inputFileName = sys.argv[1]

elif len(sys.argv) == 3:
    templateFileName = sys.argv[1]
    inputFileName = sys.argv[2]

else:
    sys.stderr.write(repr(sys.argv) + '\n')
    sys.exit(1)
    
templates = stringtemplate3.StringTemplateGroup(
    file=open(templateFileName, 'r'),
    lexer='angle-bracket'
    )

cStream = antlr3.ANTLRFileStream(inputFileName)
lexer = CMinusLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = CMinusParser(tStream)
parser.templateLib = templates
r = parser.program()
print r.st.toString()
Пример #16
0
# Pop the first entry which is the path to the directory containing this
# script.
sys.path.pop(0)

import os.path

import antlr3

from c_llvm.parser.c_grammarLexer import c_grammarLexer
from c_llvm.parser.c_grammarParser import c_grammarParser
from c_llvm.ast.base import AstTreeAdaptor

# input = '...what you want to feed into the parser...'
# char_stream = antlr3.ANTLRStringStream(input)
# or to parse a file:
char_stream = antlr3.ANTLRFileStream(sys.argv[1])
# or to parse an opened file or any other file-like object:
# char_stream = antlr3.ANTLRInputStream(file)

lexer = c_grammarLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = c_grammarParser(tokens)
parser.setTreeAdaptor(AstTreeAdaptor())
r = parser.translation_unit()

root = r.tree
print "tree = " + root.toStringTree()
output_file = os.path.splitext(sys.argv[1])[0] + '.ll'
with open(output_file, 'w') as f:
    f.write(root.generate_code())
Пример #17
0
# vim:fileencoding=gbk

import argparse
import sys
import antlr3
import antlr3.tree
from CMinusLexer import CMinusLexer
from CMinusParser import CMinusParser
from llvmCompiler import LLVMCompiler

parser = argparse.ArgumentParser()
parser.add_argument('filename', help='the name of script file')
parser.add_argument('-O', '--optimize', action='store_true', help='optimize the bytecode')
parser.add_argument('-v', '--verbose', action='store_true', help='output the bytecode to ll file')
args = parser.parse_args()

istream = antlr3.ANTLRFileStream(args.filename, 'utf-8')
lexer = CMinusLexer(istream)
parser = CMinusParser(antlr3.CommonTokenStream(lexer))
meta = parser.program()

compiler = LLVMCompiler(meta)
compiler.compile(args.optimize)
if args.verbose: 
    compiler.dump(args.filename + '.ll')
compiler.run()
Пример #18
0
    def parse_layout_slave(self, xkbfilename, variantname = None):
        #print "+++++We are recursive, called with", xkbfilename, variantname
        char_stream = antlr3.ANTLRFileStream(xkbfilename, encoding='utf-8')
        lexer = XKBGrammarLexer(char_stream)
        tokens = antlr3.CommonTokenStream(lexer)
        parser = XKBGrammarParser(tokens)

        parser_layout = parser.layout()
        variants = []

        xml_layout = etree.Element('layout')
        xml_layout.attrib['layoutname'] = os.path.basename(xkbfilename)
        
        includes = []

        for symbols in parser_layout.tree.getChildren():
            eSymbol = etree.SubElement(xml_layout, 'symbols')
            for mapobject in symbols.getChildren():
                if mapobject.getType() == MAPTYPE:
                    for maptypesect in mapobject.getChildren():
                        if maptypesect.getType() == MAPOPTIONS:
                            for mapoption in maptypesect.getChildren():
                                if mapoption.getText() == 'xkb_symbols' or mapoption.getText() == 'hidden':
                                    eMapOption = etree.SubElement(eSymbol, 'mapoption')
                                    eMapOption.text = mapoption.getText()
                        elif maptypesect.getType() == MAPNAME:
                            if maptypesect.getChildCount() == 1:
                                eMapName = etree.SubElement(eSymbol, 'mapname')
                                eMapName.text = maptypesect.getChildren()[0].getText()[1:-1]
                                variants.append(maptypesect.getChildren()[0].getText()[1:-1])
                            else:
                                return { "success": False }
                        else:
                            return { "success": False }
                elif mapobject.getType() == MAPMATERIAL:
                    eMapMaterial = etree.SubElement(eSymbol, 'mapmaterial')
                    for name in self.getChildrenByType(mapobject, TOKEN_NAME):
                        nameText = name.getChild(0).getText()[1:-1]
                        eTokenName = etree.SubElement(eMapMaterial, 'tokenname', name=nameText ) 
                    for include in self.getChildrenByType(mapobject, TOKEN_INCLUDE):
                        eInclude = etree.SubElement(eMapMaterial, 'tokeninclude')
                        eInclude.text = include.getChild(0).getText()[1:-1]
                        includes.append(eInclude.text)
                    for keytype in self.getChildrenByType(mapobject, TOKEN_KEY_TYPE):
                        keytypeText = keytype.getChild(0).getText()
                        eKeyType = etree.SubElement(eMapMaterial, 'tokentype')
                        eKeyType.text = keytypeText[1:-1]
                    for modmap in self.getChildrenByType(mapobject, TOKEN_MODIFIER_MAP):
                        eModMap = etree.SubElement(eMapMaterial, 'tokenmodifiermap', state=modmap.getChild(0).getText())
                        for modstate in self.getChildrenByTypes(modmap, KEYCODE, KEYCODEX):
                            if modstate.getType() == KEYCODE:
                                  eModState = etree.SubElement(eModMap, "keycode", value=modstate.getChild(0).getText())
                            elif modstate.getType() == KEYCODEX:
                                eModState = etree.SubElement(eModMap, "keycodex", value=modstate.getChild(0).getText())
                            else:
                                return { "success": False }
                                # print "Unexpected token encountered. Aborting...", modstate.getText()
                                # sys.exit(-1)
                    allkeysymgroups = {}
                    for keyset in self.getChildrenByType(mapobject, TOKEN_KEY):
                        allkeysymgroups[keyset.getChild(0).getChild(0).getText()] = keyset
                    sortedkeysymgroups = self.sortDict(allkeysymgroups, KeycodesReader.compare_keycode)
                    for keyset in sortedkeysymgroups:
                        elem_keysymgroup = self.getChildrenByType(keyset, ELEM_KEYSYMGROUP)
                        elem_virtualmods = self.getChildrenByType(keyset, ELEM_VIRTUALMODS)
                        elem_overlay = self.getChildrenByType(keyset, OVERLAY)
                        override = self.getChildrenListByType(keyset, OVERRIDE)
                        eTokenKey = etree.SubElement(eMapMaterial, 'tokenkey')
                        eKeyCodeName = etree.SubElement(eTokenKey, 'keycodename')
                        keycodex = self.getChildrenListByType(keyset, KEYCODEX)
                        if len(keycodex) == 1:
                            eKeyCodeName.text = keycodex[0].getChild(0).getText()
                        else:
                            return { "success": False }
                            #print "Could not retrieve keycode name"
                            #exit(-1)
                        if len(override) == 1:
                            eTokenKey.attrib['override'] = "True"
                        else:
                            eTokenKey.attrib['override'] = "False"
                        if len(self.getChildrenListByType(keyset, ELEM_KEYSYMGROUP)):
                            elem_keysyms = self.getChildrenListByType(keyset, ELEM_KEYSYMS)
                            eKeySymGroup = etree.SubElement(eTokenKey, 'keysymgroup')
                            keysymgroup_counter = len(self.getChildrenListByType(keyset, ELEM_KEYSYMGROUP))
                            for elem in elem_keysymgroup:
                                eSymbolsGroup = etree.SubElement(eKeySymGroup, 'symbolsgroup')
                                for elem2 in elem.getChildren():
                                    for elem3 in elem2.getChildren():
                                        eSymbol = etree.SubElement(eSymbolsGroup, 'symbol')
                                        eSymbol.text = elem3.getText()    
                            if len(elem_keysyms) > 0:
                                if len(elem_keysyms) == 1:
                                    ksname = elem_keysyms[0].getChild(0).getText()
                                    eKeySyms = etree.SubElement(eKeySymGroup, 'typegroup', value=ksname[1:-1])
                                else:
                                    """ We are probably processing level3; we keep first item """
                                    ksname = elem_keysyms[0].getChild(0).getText()
                                    eKeySyms = etree.SubElement(eKeySymGroup, 'typegroup', value=ksname[1:-1])
                                    #print "Possibly processing level3"
                        if len(self.getChildrenListByType(keyset, ELEM_VIRTUALMODS)):
                            for vmods in elem_virtualmods:
                                etree.SubElement(eKeySymGroup, 'tokenvirtualmodifiers', value=vmods.getChild(0).getText())
                        if len(self.getChildrenListByType(keyset, OVERLAY)):
                            for elem in elem_overlay:
                                for elem2 in self.getChildrenByType(elem, KEYCODEX):
                                    pass
                else:
                    return { "success": False }
                    
        extraction_result = ExtractVariantsKeycodes(xml_layout, variantname)
        return { 'success': True, 
                 'all_variants': variants,
                 'variants': extraction_result['variants'], 
                 'layout': xml_layout, 
                 'keydict': extraction_result['keydict']
             }
Пример #19
0
def load_from_file(filename):
    """
    Parse a FCL file to a fuzzy.systems (Mamdani, Sugeno or Tsukamoto) instance
    """
    return __load(antlr3.ANTLRFileStream(filename))
def parseFile(fileandvariant="/usr/share/X11/xkb/keycodes/xfree86|xfree86",
              *morefilesandvariants):
    keycodedb = {}
    for eachfileandvariant in (fileandvariant, ) + morefilesandvariants:
        filename, pipe, variant = eachfileandvariant.partition('|')

        try:
            file = open(filename, 'r')
        except OSError:
            print "Could not open file ", filename, " Aborting..."
            sys.exit(-1)
        file.close

        char_stream = antlr3.ANTLRFileStream(filename)
        lexer = KeycodesLexer(char_stream)
        tokens = antlr3.CommonTokenStream(lexer)
        parser = KeycodesParser(tokens)

        result = parser.keycodedoc()

        nodes = antlr3.tree.CommonTreeNodeStream(result.tree)
        nodes.setTokenStream(tokens)
        walker = KeycodesWalker(nodes)
        # walker.keycodedoc()

        keycodeidinclude = [variant]

        for itemKeycodeDoc in result.tree.getChildren():
            copying = False
            listType = getChildrenListByType(itemKeycodeDoc, KEYCODELISTTYPE)
            material = getChildrenListByType(itemKeycodeDoc, KEYCODEMATERIAL)
            if len(listType) != 1:
                print "Requires single node for KEYCODELISTTYPE. Found", len(
                    listType)
                sys.exit(-1)
            if len(material) != 1:
                print "Requires single node for KEYCODEMATERIAL. Found", len(
                    material)
                sys.exit(-1)

            for listNameGroup in getChildrenListByType(listType[0],
                                                       KEYCODELISTNAME):
                for listName in listNameGroup.getChildren():
                    if listName.getText()[1:-1] == variant or listName.getText(
                    )[1:-1] in keycodeidinclude:
                        copying = True

            if not copying:
                break

            for materialIncludeGroup in getChildrenListByType(
                    material[0], INCLUDE):
                for includeName in materialIncludeGroup.getChildren():
                    includeKeycodelist = re.findall(
                        '(\w+)\((\w+)\)',
                        includeName.getText()[1:-1])
                    if includeKeycodelist[0][1] not in keycodeidinclude:
                        keycodeidinclude.append(includeKeycodelist[0][1])

            for keycode in getChildrenListByType(material[0], KEYCODE):
                keycodedb[keycode.getChild(0).getText()] = keycode.getChild(
                    1).getText()

            for alias in getChildrenListByType(material[0], ALIAS):
                keycodedb[alias.getChild(0).getText()] = keycodedb[
                    alias.getChild(1).getText()]

            for indicator in getChildrenListByType(material[0], INDICATOR):
                pass

    return keycodedb
xkbfilename = "gr"
if len(sys.argv) > 1:
    xkbfilename = sys.argv[1]

# print "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX", sys.argv[1]

try:
    xkbfile = open(xkbfilename, 'r')
except OSError:
    print "Could not open file ", xkbfilename, ". Aborting..."
    sys.exit(-1)

xkbfile.close

char_stream = antlr3.ANTLRFileStream(xkbfilename, encoding='utf-8')
lexer = XKBGrammarLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = XKBGrammarParser(tokens)

result = parser.layout()

# print "tree =", result.tree.toStringTree()

nodes = antlr3.tree.CommonTreeNodeStream(result.tree)
nodes.setTokenStream(tokens)
walker = XKBGrammarWalker(nodes)
# walker.layout()

layout = etree.Element('layout')
Пример #22
0
    def parse(self, xkbfilename):
        char_stream = antlr3.ANTLRFileStream(xkbfilename, encoding='utf-8')
        lexer = XKBGrammarLexer(char_stream)
        tokens = antlr3.CommonTokenStream(lexer)
        parser = XKBGrammarParser(tokens)

        result = parser.layout()

        # print "tree =", result.tree.toStringTree()

        nodes = antlr3.tree.CommonTreeNodeStream(result.tree)
        nodes.setTokenStream(tokens)
        walker = XKBGrammarWalker(nodes)
        # walker.layout()

        layout = etree.Element('layout')

        doc = etree.ElementTree(layout)

        layout.attrib['layoutname'] = os.path.basename(xkbfilename)

        #print "Processing", os.path.basename(xkbfilename), "...",
        for symbols in result.tree.getChildren():
            eSymbol = etree.SubElement(layout, 'symbols')
            for mapobject in symbols.getChildren():
                if mapobject.getType() == MAPTYPE:
                    for maptypesect in mapobject.getChildren():
                        if maptypesect.getType() == MAPOPTIONS:
                            for mapoption in maptypesect.getChildren():
                                if mapoption.getText() == 'xkb_symbols' or mapoption.getText() == 'hidden':
                                    eMapOption = etree.SubElement(eSymbol, 'mapoption')
                                    eMapOption.text = mapoption.getText()
                        elif maptypesect.getType() == MAPNAME:
                            if maptypesect.getChildCount() == 1:
                                eMapName = etree.SubElement(eSymbol, 'mapname')
                                eMapName.text = maptypesect.getChildren()[0].getText()[1:-1]
                            else:
                                print "\t\t\tInternal error in mapoption"
                        else:
                            print "\t\tInternal error in maptypesect"
                            sys.exit(-2)
                elif mapobject.getType() == MAPMATERIAL:
                    eMapMaterial = etree.SubElement(eSymbol, 'mapmaterial')
                    for name in self.getChildrenByType(mapobject, TOKEN_NAME):
                        nameText = name.getChild(0).getText()[1:-1]
                        eTokenName = etree.SubElement(eMapMaterial, 'tokenname', name=nameText ) 
                    for include in self.getChildrenByType(mapobject, TOKEN_INCLUDE):
                        eInclude = etree.SubElement(eMapMaterial, 'tokeninclude')
                        eInclude.text = include.getChild(0).getText()[1:-1]
                    for keytype in self.getChildrenByType(mapobject, TOKEN_KEY_TYPE):
                        keytypeText = keytype.getChild(0).getText()
                        eKeyType = etree.SubElement(eMapMaterial, 'tokentype')
                        eKeyType.text = keytypeText[1:-1]
                    for modmap in self.getChildrenByType(mapobject, TOKEN_MODIFIER_MAP):
                        eModMap = etree.SubElement(eMapMaterial, 'tokenmodifiermap', state=modmap.getChild(0).getText())
                        for modstate in self.getChildrenByTypes(modmap, KEYCODE, KEYCODEX):
                            if modstate.getType() == KEYCODE:
                                  eModState = etree.SubElement(eModMap, "keycode", value=modstate.getChild(0).getText())
                            elif modstate.getType() == KEYCODEX:
                                eModState = etree.SubElement(eModMap, "keycodex", value=modstate.getChild(0).getText())
                            else:
                                print "Unexpected token encountered. Aborting...", modstate.getText()
                                sys.exit(-1)
                    allkeysymgroups = {}
                    for keyset in self.getChildrenByType(mapobject, TOKEN_KEY):
                        keycodex = keyset.getChild(0) if str(keyset.getChild(0)) == "keycodex" else keyset.getChild(1)
                        allkeysymgroups[keycodex.getChild(0).getText()] = keyset
                    sortedkeysymgroups = self.sortDict(allkeysymgroups, KeycodesReader.compare_keycode)
                    for keyset in sortedkeysymgroups:
                        elem_keysymgroup = self.getChildrenByType(keyset, ELEM_KEYSYMGROUP)
                        elem_virtualmods = self.getChildrenByType(keyset, ELEM_VIRTUALMODS)
                        elem_overlay = self.getChildrenByType(keyset, OVERLAY)
                        override = self.getChildrenListByType(keyset, OVERRIDE)
                        eTokenKey = etree.SubElement(eMapMaterial, 'tokenkey')
                        eKeyCodeName = etree.SubElement(eTokenKey, 'keycodename')
                        keycodex = self.getChildrenListByType(keyset, KEYCODEX)
                        if len(keycodex) == 1:
                            eKeyCodeName.text = keycodex[0].getChild(0).getText()
                        else:
                            print "Could not retrieve keycode name"
                            exit(-1)
                        if len(override) == 1:
                            eTokenKey.attrib['override'] = "True"
                        else:
                            eTokenKey.attrib['override'] = "False"
                        if len(self.getChildrenListByType(keyset, ELEM_KEYSYMGROUP)):
                            elem_keysyms = self.getChildrenListByType(keyset, ELEM_KEYSYMS)
                            eKeySymGroup = etree.SubElement(eTokenKey, 'keysymgroup')
                            keysymgroup_counter = len(self.getChildrenListByType(keyset, ELEM_KEYSYMGROUP))
                            for elem in elem_keysymgroup:
                                eSymbolsGroup = etree.SubElement(eKeySymGroup, 'symbolsgroup')
                                for elem2 in elem.getChildren():
                                    for elem3 in elem2.getChildren():
                                        eSymbol = etree.SubElement(eSymbolsGroup, 'symbol')
                                        eSymbol.text = elem3.getText()    
                            if len(elem_keysyms) > 0:
                                if len(elem_keysyms) == 1:
                                    ksname = elem_keysyms[0].getChild(0).getText()
                                    eKeySyms = etree.SubElement(eKeySymGroup, 'typegroup', value=ksname[1:-1])
                                else:
                                    """ We are probably processing level3; we keep first item """
                                    ksname = elem_keysyms[0].getChild(0).getText()
                                    eKeySyms = etree.SubElement(eKeySymGroup, 'typegroup', value=ksname[1:-1])
                                    #print "Possibly processing level3"
                        if len(self.getChildrenListByType(keyset, ELEM_VIRTUALMODS)):
                            for vmods in elem_virtualmods:
                                etree.SubElement(eKeySymGroup, 'tokenvirtualmodifiers', value=vmods.getChild(0).getText())
                        if len(self.getChildrenListByType(keyset, OVERLAY)):
                            for elem in elem_overlay:
                                for elem2 in self.getChildrenByType(elem, KEYCODEX):
                                    pass
                else:
                    print "\tInternal error at map level,", mapobject.getText()
                    # sys.exit(-2)
                    
        return layout
Пример #23
0
import antlr3
from psfLexer import psfLexer
from psfParser import psfParser

char_stream = antlr3.ANTLRFileStream('footballwives.txt')

lexer = psfLexer(char_stream)
tokens = antlr3.CommonTokenStream(lexer)
parser = psfParser(tokens)
p = parser.annotate()


def walk(node, indent=0):
    print "{indent} {info}".format(
        indent=' ' * (indent * 4),
        info=node.token if node.token else repr(node),
        #s=node.token.text if node.token else ''
    )
    if node.children:
        for c in node.children:
            walk(c, indent=indent + 1)


walk(p.tree)