Beispiel #1
0
def parse(string):
    lex.lex()
    yacc.yacc()
    rules = yacc.parse(string)

    result = []
    while rules:
        current = rules.pop(0)
        result.extend(current[1])
    return(result)
Beispiel #2
0
 def build(self, **kwdargs):
     self.parser = yacc.yacc(module=self,
                             start='program',
                             logger=self.logger,
                             **kwdargs)
     self.ope_parser = yacc.yacc(module=self,
                                 start='opecmd',
                                 logger=self.logger,
                                 **kwdargs)
     self.param_parser = yacc.yacc(module=self,
                                   start='param_list',
                                   logger=self.logger,
                                   **kwdargs)
Beispiel #3
0
    def __init__(self, options, stddef_types=True, gnu_types=True):
        self.preprocessor_parser = preprocessor.PreprocessorParser(options,self)
        self.parser = yacc.Parser()
        prototype = yacc.yacc(method        = 'LALR',
                              debug         = False,
                              module        = cgrammar,
                              write_tables  = True,
                              outputdir     = os.path.dirname(__file__),
                              optimize      = True)
        
        # If yacc is reading tables from a file, then it won't find the error
        # function... need to set it manually
        prototype.errorfunc = cgrammar.p_error
        prototype.init_parser(self.parser)
        self.parser.cparser = self

        self.lexer = CLexer(self)
        if stddef_types:
            self.lexer.type_names.add('wchar_t')
            self.lexer.type_names.add('ptrdiff_t')
            self.lexer.type_names.add('size_t')
        if gnu_types:
            self.lexer.type_names.add('__builtin_va_list')
        if sys.platform == 'win32':
            self.lexer.type_names.add('__int64')
Beispiel #4
0
def AnalizadorParser(Archivo):

    try:

        tokens = lexer.tokens
        #tokensEncontrados = Lexer.AnalizadorLex(ArchivoSetlantxt)

        #Se abre el archivo
        ArchivoSetlan = open(Archivo, 'r')

        global data

        #Se guarda en data lo que se encuentra en el ArchivoSetlan
        data = ArchivoSetlan.read()

        # Construimos el parser
        parser = yacc.yacc()

        parser.parse(data)

        #Se cierra el archivo
        ArchivoSetlan.close()

        return 0

    except IOError:

        print "ERROR: No se pudo abrir el ArchivoSetlan %s" % Archivo
        exit()
Beispiel #5
0
def procgetparser(procparse, runpass=1, parsetab_name="parsetab", arch=None):

    parser = procparse(runpass=runpass)

    try:
        parser.set_arch(arch)
    except:
        pass

    if config["cparse_version"] == "2":
        import yacc2  ##Use PLY 2.5 now we can use LALR
        ##Get existing parsetable for optimisation
        global existing_parsetables
        yaccer = yacc2.yacc(module=parser,
                            debug=0,
                            write_tables=1,
                            optimize=1,
                            method="LALR",
                            tabmodule="%s%s" % (TABLE_PREFIX, parsetab_name),
                            pass_the_pickle=existing_parsetables)
        ##Save the unpickled parse table to the canvasengine so as other parses can grab it in future
        existing_parsetables = yaccer.pass_the_pickle
    else:
        import yacc
        yaccer = yacc.yacc(module=parser,
                           debug=0,
                           method="SLR",
                           write_tables=0)

    return parser, yaccer
Beispiel #6
0
    def __init__(self):
        self.lexer = lex.lex()

        # init_assembler_variables()
        self.yaccer = yacc.yacc()
        self.tokens = []
        self.code = ''
Beispiel #7
0
def parse(sidl_file, debug=False):
    """
    Parse the .sidl file and return the object-oriented intermediate
    representation.

    \param sidl_file   the name of the input file
    \param debug       turn on debugging output
    """
    global sidlFile
    sidlFile = sidl_file

    if not os.path.isfile(sidlFile):
        print "**ERROR: %s does not exist." % sidlFile
        exit(1)

    optimize = not debug

    #lex.lex(debug=debug,optimize=optimize)
    #lex.runmain()

    parser = yacc.yacc(debug=debug, optimize=optimize)

    if debug == 1:
        logging.basicConfig(filename='parser.log',
                            level=logging.DEBUG,
                            filemode="w",
                            format="%(filename)10s:%(lineno)4d:%(message)s")
        log = logging.getLogger()
        debug = log

    #import pdb; pdb.set_trace()
    sexp = parser.parse(sidlFile, lexer=scanner, debug=debug)
    check_grammar(sexp)
    return sexp
Beispiel #8
0
    def __init__(self, options):
        self.preprocessor_parser = preprocessor.PreprocessorParser(
            options, self)
        self.parser = yacc.Parser()
        prototype = yacc.yacc(method='LALR',
                              debug=False,
                              module=cgrammar,
                              write_tables=True,
                              outputdir=os.path.dirname(__file__),
                              optimize=True)

        # If yacc is reading tables from a file, then it won't find the error
        # function... need to set it manually
        prototype.errorfunc = cgrammar.p_error
        prototype.init_parser(self.parser)
        self.parser.cparser = self

        self.lexer = CLexer(self)
        if not options.no_stddef_types:
            self.lexer.type_names.add('wchar_t')
            self.lexer.type_names.add('ptrdiff_t')
            self.lexer.type_names.add('size_t')
        if not options.no_gnu_types:
            self.lexer.type_names.add('__builtin_va_list')
        if sys.platform == 'win32' and not options.no_python_types:
            self.lexer.type_names.add('__int64')
Beispiel #9
0
    def __init__(self, underscores, options):

        # start lexer, parser, and printer
        self.options = options
        self.lexer = Lexer(underscores, options)
        self.tokens = self.lexer.tokens
        #self.parser = yacc.yacc(module=self,debug=False)
        self.parser = yacc.yacc(module=self)
        self.printer = printer.Printer()

        # programs[name][type] is a Program
        self.p_statements, self.list = 0, []
        l = [BASE, GENERATE, SPEC, PREFP, HEURISTIC, APPROX, UNSATP]
        self.programs = dict([(i, dict()) for i in l])

        # base
        self.base = ast.ProgramStatement()
        self.base.name = BASE
        self.base.type = EMPTY

        # others
        self.constants = []
        self.included = []
        self.error = False
        self.position = None
        self.element = None
        self.filename = None
        self.program = None

        # clingo optimize statements
        self.preference_statement = False
        self.clingo_statement = False
Beispiel #10
0
def re2ast(s):
    """This function turns a regular expression (passed in as a string s)
       into an abstract syntax tree, and returns the tree (encoded in Python)
    """
    mylexer = lex()
    myparser = yacc()
    pt = myparser.parse(s, lexer=mylexer)
    return (pt['ast'], pt['dig']['nl'], pt['dig']['el'])
Beispiel #11
0
def parse(s):

    lexer = lex.lex()
    lexer.input(s)
    parser = yacc.yacc()  # debug=1
    print("Parsing...")
    root = parser.parse(lexer=lexer)  # debug=1
    return root
Beispiel #12
0
def parse(s):

    lexer = lex.lex()
    lexer.input(s)
    parser = yacc.yacc()  #debug=1
    print("Parsing...")
    root = parser.parse(lexer=lexer)  #debug=1
    return root
Beispiel #13
0
def parse(token_list):
    global ast
    lexer = CMLex(token_list)
    parser = yacc.yacc(debug=False,
                       write_tables=False,
                       errorlog=yacc.NullLogger())
    ast = AST()
    parser.parse(lexer=lexer)
    return ast
Beispiel #14
0
def main():
    filename = sys.argv[1]
    lexed = read_input(filename)
    token_tuples = get_tokens(lexed)

    lex = Lexer(token_tuples)
    parser = yacc.yacc()
    ast = yacc.parse(lexer=lex)
    write_output(filename, ast)
Beispiel #15
0
    def __init__(self, stddef_types=True, gnu_types=True, cache_headers=True):
        self.preprocessor_parser = CPreprocessorParser(self)
        self.parser = yacc.Parser()
        yacc.yacc(method='LALR').init_parser(self.parser)
        self.parser.cparser = self

        self.lexer = CLexer(self)
        if stddef_types:
            self.lexer.type_names.add('wchar_t')
            self.lexer.type_names.add('ptrdiff_t')
            self.lexer.type_names.add('size_t')
        if gnu_types:
            self.lexer.type_names.add('__builtin_va_list')
        if sys.platform == 'win32':
            self.lexer.type_names.add('__int64')

        self.header_cache = {}
        self.cache_headers = cache_headers
        self.load_header_cache()
Beispiel #16
0
    def __init__(self, stddef_types=True, gnu_types=True, cache_headers=True):
        self.preprocessor_parser = CPreprocessorParser(self)
        self.parser = yacc.Parser()
        yacc.yacc(method='LALR').init_parser(self.parser)
        self.parser.cparser = self

        self.lexer = CLexer(self)
        if stddef_types:
            self.lexer.type_names.add('wchar_t')
            self.lexer.type_names.add('ptrdiff_t')
            self.lexer.type_names.add('size_t')
        if gnu_types:
            self.lexer.type_names.add('__builtin_va_list')
        if sys.platform == 'win32':
            self.lexer.type_names.add('__int64')

        self.header_cache = {}
        self.cache_headers = cache_headers
        self.load_header_cache()
Beispiel #17
0
def doxyyacc(elist=elem_list(), mylexer=doxylex.doxylex()):
    def p_translation_unit(p):
        '''translation_unit : statement
                        | translation_unit statement'''
        pass

    def p_statement_assign(p):
        'statement : KEY expression'
        updatestr(p)
        elist.push([p[1][1:], p[2]])
        if DEBUG_DOXYYACC==1: print(get_cur_info()[0],p[0])
        pass

    def p_statement_line(p):
        'statement : expression'
        if p[1] == '':
            # 如果是空的,可以忽略
            pass
        else:
            # 如果不是空行,应加以保存
            updatestr(p)
            if DEBUG_DOXYYACC==1: print(get_cur_info()[0],p[0])

    def p_expression_com(p):
        'expression : expression WORD NEWLINE'
        #updatestr(p, 1)
        p[0] = ''
        p[0] = p[1] + '\n' + p[2]
        if DEBUG_DOXYYACC==1: print(get_cur_info()[0],p[0])
        pass

    def p_expression_line(p):
        'expression : WORD NEWLINE'
        updatestr(p, 1)
        if DEBUG_DOXYYACC==1: print(get_cur_info()[0],p[0])
        pass

    def p_expression_blankline(p):
        '''expression : BLANKLINE
                       | NEWLINE'''
        p[0] = ''
        if DEBUG_DOXYYACC==1: print(get_cur_info()[0],p[0])

    def p_empty(p):
        'empty : '
        pass

    def p_error(p):
        print("syntax error at '%s %d'" % (p.value, p.lineno))

    lexer = mylexer.lexer
    tokens = mylexer.tokens
    p1=yacc.yacc(method='LALR', tabmodule='doxy_tab', debugfile='doxyparser.out')
    return p1, elist.cm
Beispiel #18
0
    def __init__(self, **kwargs):
        self.names = {}
        self.debug = kwargs.get('debug', 0)

        # Build the lexer and parser
        lex.lex(module=self, debug=self.debug)
        _, self.grammar = yacc.yacc(module=self,
                                    debug=self.debug,
                                    tabmodule='_parsetab',
                                    with_grammar=True)

        self.prodnames = self.grammar.Prodnames
Beispiel #19
0
def create_globals(module, support, debug):
    global parser, lexer, m, spt
    if not parser:
        lexer = lex.lex()
        parser = yacc.yacc(method="LALR", debug=debug, write_tables=0)

    if module is not None:
        m = module
    else:
        m = refpolicy.Module()

    if not support:
        spt = refpolicy.SupportMacros()
    else:
        spt = support
Beispiel #20
0
def create_globals(module, support, debug):
    global parser, lexer, m, spt
    if not parser:
        lexer = lex.lex()
        parser = yacc.yacc(method="LALR", debug=debug, write_tables=0)

    if module is not None:
        m = module
    else:
        m = refpolicy.Module()

    if not support:
        spt = refpolicy.SupportMacros()
    else:
        spt = support
Beispiel #21
0
def main():
    # Build the lexer
    lexer = lex.lex()
    # Build the parser
    parser = yacc.yacc()
    # Prompt for a command
    more_input = True
    while more_input:
        input_command = input("Pluto 1.0>>>")
        if input_command == 'q':
            more_input = False
            print('Bye for now')
        elif input_command:
            result = parser.parse(input_command)
            print(result)
    def __init__(self, language='en'):
        self.language = language
        self.lock = Lock()

        try:
            modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
        except:
            modname = "parser"+"_"+self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"

        lex.lex(module=self, debug=False)
        self.p = yacc.yacc(module=self,
                           debug=0,
                           outputdir=outputdir,
                           debugfile=self.debugfile,
                           tabmodule=self.tabmodule,)
Beispiel #23
0
def re2nfa(s, stno=0):
    """Given a string s representing an RE and an optional
       state number stno (default 0), generate an NFA that
       is language equivalent to the RE
    """
    # Reset the state number generator to 0
    ResetStNum()
    # NxtStateStr() gets called whenever needed.
    # Defined in StateNameSanitizers.py

    relexer = lex()

    #-- NOW BUILD THE PARSER --
    reparser = yacc()
    #-- FEED IT THE LEXER --
    myparsednfa = reparser.parse(s, lexer=relexer)
    #-- for debugging : return dotObj_nfa(myparsednfa, nfaname)
    return myparsednfa
Beispiel #24
0
def beginParse(program):
    yacc = lexyacc.yacc()
    try:
        result = yacc.parse(program.read(),lexer = lexmelon.lex())
        aux = eval({},result)
        if isinstance(aux,bool):
            aux = str(aux).lower()
        if isinstance(aux,NodoBin):
            if aux.tipo == 'LISTA':
                print  recorrer_list(aux)
        else:
            print aux
    except SyntaxError, e:
        token = e.token
        if token:
            print 'Error de sintaxis en linea ' + str(token.lineno) \
                + ' cerca de token ' + '"' + str(token.value) + '"'
        else:
            print 'Error al final del programa'
Beispiel #25
0
    def __init__(self, language='en'):
        self.language = language
        self.lock = Lock()

        try:
            modname = os.path.split(os.path.splitext(__file__)
                                    [0])[1] + "_" + self.__class__.__name__
        except:
            modname = "parser" + "_" + self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"

        lex.lex(module=self, debug=False)
        self.p = yacc.yacc(
            module=self,
            debug=0,
            outputdir=outputdir,
            debugfile=self.debugfile,
            tabmodule=self.tabmodule,
        )
Beispiel #26
0
def compile_to_IL2(data, vars, remoteresolver, imported, debug=0):
    #print "<<<<<<<<<<<<<<<<<<<<<<< USING CPARSE 2 FOR COMPILATION of : >>>>>>>>>>>>>>>>>>>>>>>>>>>>"
    assert type(remoteresolver) == types.InstanceType
    #devlog("cparse2", "compile_to_IL data: %s"%data)
    dumpfile(debug, data, "out.E", rand=True)
    dumpfile(debug, arraydump(vars), "out.c_vars")

    ##Initialise our lexer logic and lex, as well as our parser logic
    parser = cparse2.CParse2(remoteresolver=remoteresolver,
                             vars=vars,
                             imported=imported)

    yaccer = yacc.yacc(module=parser,
                       debug=0,
                       write_tables=1,
                       method="SLR",
                       tabmodule=remoteresolver.parse_table_name)

    try:
        ret_IL = yaccer.parse(data, lexer=parser.lexer)
    except Exception, err:
        raise
Beispiel #27
0
def constr_testing(value, constr, var_name):
    global names

    lexer = lex.lex()
    parser = yacc.yacc()
    # print parser.parse('ASSERT(NOT(123 = 123))')

    # print constr

    for index, eachvar in enumerate(var_name):
        str_value = []
        for val in value[index]:
            if val != '':
                # TODO: input concrete value must be integer
                str_val = BitArray(uint = int(val), length = 8)
                str_value.append('0x' + str_val.hex)

        names[eachvar] = str_value
    #print names


    return ([constr[0]], yacc.parse(constr[1]))
Beispiel #28
0
    def parse(self, sentence):
        ''' 
        Perform the actual parsing (lex/yacc), define and use the grammar for
        propositional and first-order logic.
        
        Builds a parse tree and returns the root (an operator with the lowest precedent).
        '''
        
        # build the lexer
        
        # eclipse may warn you that this is an unused variable, but lex will use it
        tokens = (
                  'LBRACE', 'RBRACE', 'AND', 'OR', 'IMPLIES', 'NOT', 'ALL', 'EXISTS',
                  'PREDICATE', 'BINDING'
                  )
        
        #### DEFINE EVERY TOKEN MATCH AS A FUNCTION ###
        # this has the important side-effect of being able
        # to strictly control order of lexical matching
        
        def t_LBRACE (t): 
            r'\{'
            return t
        def t_RBRACE (t): 
            r'\}'
            return t
        def t_PREDICATE(t): 
            '\[[^\]]+\]'
            return t
        
        def t_AND(t):
            r'and'
            return t
        def t_OR(t):
            r'or'
            return t
        def t_IMPLIES(t):
            r'implies|therefore'
            return t
        
        def t_NOT(t):
            r'not'
            return t
        
        def t_ALL(t):
            r'all'
            return t
        
        def t_EXISTS(t):
            r'exists'
            return t
        
        def t_BINDING(t):
            r'[a-zA-Z_][a-zA-Z_0-9]*'
            return t
            
        # eclipse will warn you that this is an unused variable, but yacc/lex will use it
        precedence = (
                      ('left', 'IMPLIES'),
                      ('left', 'OR', 'AND'),
                      ('left', 'EXISTS', 'ALL'),
                      ('left', 'NOT'),
                      ('left', 'PREDICATE')
                      )
        
        # Build the lexer
        # again, eclipse will warn about this being an unused variable, but yacc will use it
        lexer = lex.lex()
        
        
        def p_expression_not(p):
            'expression : NOT expression'
            p[0] = Negation(p[2])
                
        def p_expression_forall_pred(p):
            'expression : ALL BINDING expression'
            p[0] = All(self, str(p[2]), self.domain, p[3])
            preds = p[0].getBoundVariablePreds()
            for x in preds:
                if(x.getFullStatement() in self.predicates.keys()):
                    del self.predicates[x.getFullStatement()]
            
        def p_expression_exists_pred(p):
            'expression : EXISTS BINDING expression'
            p[0] = Exists(self, str(p[2]), self.domain, p[3])
            preds = p[0].getBoundVariablePreds()
            for x in preds:
                if(x.getFullStatement() in self.predicates.keys()):
                    del self.predicates[x.getFullStatement()]
        
        def p_expression_implies(p):
            'expression : expression IMPLIES expression'
            p[0] = Implication(p[1], p[3])
            
            
        def p_expression_and(p):
            'expression : expression AND expression'
            p[0] = Conjunction(p[1], p[3])
            
        def p_expression_or(p):
            'expression : expression OR expression'
            p[0] = Disjunction(p[1], p[3])
        
        def p_expression_group(p):
            'expression : LBRACE expression RBRACE'
            p[0] = p[2]
            
#        def p_expression_term(p):
#            'expression : predicate'
#            p[0] = p[1]
            
        def p_predicate(p):
            'expression : PREDICATE'
            
            stringified = str(p[1])[1:len(str(p[1])) - 1]
            
            if(stringified in self.predicates):
                p[0] = self.predicates[stringified]
            else:
                p[0] = Predicate(stringified, self.domain)
                self.predicates[stringified] = p[0]
                if(stringified in self.predicateAssumptions):
                    p[0].truth = True
                
        # Error rule for syntax errors
        def p_error(p):
            # get the position
            if p is None: return
            pos = p.lexpos
            # get the nth line
            lines = p.lexer.lexdata.split("\n");
            inp = lines[p.lexer.lineno - 1]
            
            hint = "Unexpected operator or predicate not wrapped in []"
            
            if(inp[pos - 1] == '['):
                hint = "no matching closing bracket ']' after this point"
            if(inp[pos - 1] == '{'):
                hint = "no matching closing brace '}' after this point"
            if(inp[pos] == '}'):
                hint = "no matching opening brace '{' before this point"
            if(inp[pos] == ']'):
                hint = "no matching opening bracket '[' before this point"
            
            raise LogicError("Syntax error at line " + str(p.lexer.lineno) + ", character " + str(pos) + ":\n" + inp + "\n" + (pos * " ") + "^ " + hint);
        
        parser = yacc.yacc("LALR",0)

        parseTree = parser.parse(sentence)
        
        # return the outer-most (lowest priority) operator in this sentence -- it serves as the
        # root to the parse tree
        return parseTree
Beispiel #29
0
import lexer_rules
import parser_rules
import lex
import yacc

lexer = lex.lex(module=lexer_rules)
parser = yacc.yacc(module=parser_rules)
text = "(14 - 6) / 2"
ast = parser.parse(text, lexer)

print (ast)
Beispiel #30
0
	PSaltos.append(len(listQuadruple))

def p_seen_CP_LOOPIF(p):
	"""
	seen_CP_LOOPIF :
	"""

	aux = PilaO.pop()
	if aux[1] is 0:
		createGoToQuadruple(20, aux[0], None, None)
		PSaltos.append(len(listQuadruple)-1)
	else:
		print ("BoxesSemanticError: Error in LOOPIF statement. In line: " + str(p.lineno(1)))

def p_error(t):
    print "BoxesParserError: Error, lineno: ", t.lineno
    exit(1)

import profile

# Build the grammar
yacc.yacc(method='LALR')
#READ CODE
with open(sys.argv[1],'r') as content_file:
	sourceCode = content_file.read()


yacc.parse(sourceCode)
outputDic = {'proc': MetDic, 'quad': listQuadruple, 'cons': ConDic}

print json.dumps(outputDic)
Beispiel #31
0
    u'''depnode : depnode OR depnode'''
    if not isinstance(t[1], DeprelNode_Not) and not isinstance(t[3], DeprelNode_Not):
        t[0] = DeprelNode_Or(t[1], t[3])
    else:
        raise ExpressionError(u"Negated depency restrictions are not allowed inside OR operators, maybe try to include negation outside the OR operator.")
 
def p_dn_not(t):
    u'''depdef : NEG depdef'''
    t[0] = DeprelNode_Not(t[2])

def p_sn_not(t):
    u'''tokendef : NEG tokendef'''
    t[0] = SetNode_Not(t[2])

lex.lex(reflags=re.UNICODE)
yacc.yacc(write_tables=0,debug=1,method='SLR')

 
if __name__=="__main__":
    import argparse
    parser = argparse.ArgumentParser(description='Expression parser')
    parser.add_argument('expression', nargs='+', help='Training file name, or nothing for training on stdin')
    args = parser.parse_args()
    e_parser=yacc.yacc(write_tables=0,debug=1,method='LALR')
    for expression in args.expression:

        import logging
        logging.basicConfig(filename='myapp.log', level=logging.INFO)
        log = logging.getLogger()
        ebin = e_parser.parse(expression.decode('utf8'), debug=0)
        print ebin.to_unicode()
Beispiel #32
0

def p_code_stmt(p):
    '''code_stmt : qualified ';'
	'''
    if (Debug1): print "Rule Declared: 234"


def p_error(p):
    print "line :", p.lineno, "-Parsing Error Found at Token:", p.type
    parser.errok()


old_stderr = sys.stderr

parser = yacc.yacc(start='start_symbol', debug=True)

#Scanning the file name
if (len(sys.argv) == 1):
    file_name = raw_input("Give an Ada file to parse: ")
else:
    file_name = sys.argv[1]

try:
    lexer = lex.lex()

    with open(file_name) as fp:  #opening file
        data = fp.read()
        parser = yacc.yacc(start='start_symbol', debug=True)

        lexer.input(data)
Beispiel #33
0
        self.Col=Col

    def __str__(self):
        return "Arreglo de Estilos y Arreglo de Expresiones Graficables de Tamanos Diferentes. En linea, "+str(self.Lin)+"; Columna "+str(self.Col)

class e_nonev_num(Exception): ## No evala a Numeros
    def __init__(self,valor,Lin,Col):
        self.valor=valor
        self.Lin=Lin
        self.Col=Col

    def __str__(self):
        return "La expresion del Range no evalua a Numeros. En linea, "+str(self.Lin)+"; Columna "+str(self.Col)

## Constructor del Parser
import yacc
yacc.yacc()

if __name__ == "__main__":

    NM = re.sub(".txt","",sys.argv[1])
    FIL = open(sys.argv[1],"r")

    a =yacc.parse(FIL.read(),tracking=True)

    NEW = open("entrada.pl","w")
    NEW.write("set term pdf\n")
    NEW.write("set output '"+NM+".pdf' \n")
    NEW.write(a)
    
Beispiel #34
0
 def __init__(self):
     Utils.Debug.__init__(self)
     self.lexer = Lexer.Lexer()
     self.parser = yacc.yacc(module=self)
def nsExactMatch(data):
    # List of token names
    tokens = (
      'KEYWORD',    
      'IP',
      'FLOW', 
      'INT',
      'FLOAT',    
      'EQ',
      'GT',
      'LT',  
      'LPAREN',
      'RPAREN',
      'AND',
      'OR'
    )
    
    # Regular expression rules for simple tokens
    # 
    t_AND = r'\+'
    t_OR = r'\|'
    t_EQ   = r'[=]'
    t_GT   = r'>'
    t_LT   = r'<'    
    t_LPAREN  = r'\('
    t_RPAREN  = r'\)'
    t_KEYWORD = r'[a-zA-Z][a-zA-Z0-9\-\:\.]*'
    
    # simplified tokens
    eight_bit_number = r'(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
    port = r'\d+'
    dot = r'\.'
    colon = r'\:'
    ipv4 = dot.join([eight_bit_number]*4)
    flow = colon.join([ipv4,port]*2)

    # regex rules that require actions and special handling
    @lex.TOKEN(flow)   
    def t_FLOW(t):
        return t
    
    @lex.TOKEN(ipv4)
    def t_IP(t):
        return t
    
    def t_FLOAT(t):
        r'(\d+)\.(\d*)'
        t.value = float(t.value)
        t.value = round(t.value,2) 
        return t
    
    def t_INT(t):
        r'\d+'
        t.value = int(t.value)
        return t     
    
    # A string containing ignored characters (spaces and tabs)
    t_ignore  = ' \t'
    
    # Error handling rule
    def t_error(t):
        print "Illegal character '%s'" % t.value[0]
        t.lexer.skip(1)
    
    # Build the lexer from my environment and return it    
    
    lex.lex()
    
    def p_query_unary_paren(p):
        ' query : LPAREN query RPAREN '
        p[0]=p[2]
        #print 'rule>> query : LPAREN query RPAREN>> ' + str(p[0])
    
    def p_query_binary(p):
        ''' query : query OR query
                | query AND query
                '''
        if p[2]=='+':
            p[0]={'$and':[p[1],p[3]]} 
        else:
            p[0]={'$or':[p[1],p[3]]} 
     
        #print 'rule>> query : query OR query >> ' + str(p[0])
      
    def p_query_token(p):
        'query : token'
        p[0]=p[1]
        #print ' rule>> query : token >> ' + str(p[0])
        
    def p_token_id(p):
        'token : id'
        p[0]={'content':p[1]}
    
    
    def p_token_attr_val(p):
        'token : attr_val'
        p[0]=p[1]
            
    def p_attr_val(p):
        '''
        attr_val : id EQ id 
                | id GT id   
                | id LT id
        '''
        if p[2]=='>':
            p[0]={p[1]:{'$gt':p[3]}}
        elif p[2]=='<':
            p[0]={p[1]:{'$lt':p[3]}}
        else:
            p[0]={p[1]:p[3]}   
                
    def p_id(p):
        ''' 
        id : INT
              | IP 
              | FLOW
              | FLOAT 
        '''
        #print p[1]
        p[0] = p[1]
        #print p[0]
        
    def p_id_keyword(p):
        ''' 
        id : KEYWORD
        '''
        #print p[1]
        p[0] = p[1].lower()
        #print p[0]       
    # Error rule for syntax errors
    def p_error(p):
        print "Syntax error in input!"
        #print p
    
    parser = yacc.yacc()

    return parser.parse(data)
Beispiel #36
0
    if len(p) == 2:
        p[0] = Tree('cblock', p[1])
    else:
        p[1].value += p[2]
        p[0] = p[1]


def p_error(p):
    console.error("inliner error report:")
    if p:
        console.error("error at %s:%s" % (p.lexer.sourcefile, p.lexer.lineno))
        console.error("Input %s" % p.type)
    sys.exit()


parser = yacc.yacc()


def parse(input, source='__string__', slineno=1):
    #import pdb
    #pdb.set_trace()
    if len(input) == 0:
        msg = 'Inliner.WARN:: %s:%s is empty.' % (source, slineno),
        raise AttributeError, msg

    if slineno is not None:
        lexer.lineno = slineno + 1
    else:
        raise AttributeError, 'inliner.parse, slineno is None!'

    lexer.sourcefile = source
Beispiel #37
0
                            s&=all_tokens
                    if not s: return False
            else:
                raise ValueError("I can't handle this!")
        else:
            raise ValueError("I can't handle this!")
    if s:
        return True
    else: return False




if __name__==u"__main__":

    e_parser=yacc.yacc()
    expression=u"_ !>/nsubj/ _ >/dobj/ _ >/cop/ _ "

    node=e_parser.parse(expression)
    print "search tree:",node.to_unicode()

    conn=sqlite3.connect("/mnt/ssd/sdata/sdata2.db")
    out=codecs.getwriter("utf-8")(sys.stdout)

    for t in query(conn,node):
        t.to_conll(out)
        



Beispiel #38
0
if len(sys.argv) < 3:
    sys.exit('Usage: %s input-file output-file' % sys.argv[0])

fileName = sys.argv[1]
outputName = sys.argv[2] 

#Opens input and output file
file = codecs.open(fileName, "r", "utf-8")
output = codecs.open(outputName, "w", "utf-8")

# concat input from file
inp = ''
for line in file:
    inp += line
 
yacc.yacc( start='input' )

yada = yacc.parse( inp )

g.signatures = a.retrieveSignatures( yada.children[0] )

# validates signatues are correct before doing anything else
# if any formating is incorrect prints a message and then
# exits the program
if validateSignatures( g.signatures ):
    g.equations = a.retrieveEquations( yada.children[1] )

if( validateEquations( g.equations, g.signatures ) ):

##### Tests #####
#expr1 = Expr('top', [dict({'ArgType' : 'StackInt', 'Value' : Expr("push", [dict({'ArgType' : 'StackInt', 'Value' : "empty"}), dict({'ArgType' : "int" ,'Value' : 2})])})])
Beispiel #39
0
 def __init__(self, lexer = None):
     if lexer is None:
         lexer = IndentLexer()
     self.lexer = lexer
     self.parser = yacc.yacc(start="file_input_end")
Beispiel #40
0
    p[0] = '0b' + s.bin

def p_expression_if_then_else_endif(p):
    "expression : if expression then expression else expression endif"
    s1 = BitArray(p[2])
    if int(s1.int > 0):
        p[0] = p[4]
    else:
        p[0] = p[6]

def p_error(p):
    print "Syntax error at '%s'" % p.value


import yacc as yacc
yacc.yacc(errorlog=log)



# test concrete value against a path constraint
def constr_testing(value, constr, var_name):
    global names

    lexer = lex.lex()
    parser = yacc.yacc()
    # print parser.parse('ASSERT(NOT(123 = 123))')

    # print constr

    for index, eachvar in enumerate(var_name):
        str_value = []
Beispiel #41
0
def tdiCompile(text,replacementArgs=_replacementArgs(())):
    import lex
    if isinstance(replacementArgs,tuple):
        return tdiCompile(text,_replacementArgs(replacementArgs))
    elif not isinstance(replacementArgs,_replacementArgs):
        raise Exception("Second argument to tdiCompile, if suppied, must by a tupple")

### Lexical Tokens
    tokens=['PLUS','MINUS','TIMES','DIVIDE','EQUAL','EQUALS',
            'LPAREN','RPAREN','LBRACE','RBRACE','LBRACKET','RBRACKET','COMMA',
            'BU','B','WU','W','LU','L','QU','Q','FloatNum','T','T2','IDENT','PLACEHOLDER',
            'NAME','ARROW','GREATER','LESS','RAISE','GREATER_EQUAL',
            'LESS_EQUAL','NOT_EQUAL','QUESTION','COLON','LSHIFT','RSHIFT',
            'SEMICOLON','IAND','AND','NOT','PLUSPLUS','MINUSMINUS',
            'SLASHSLASH','IOR','OR','INOT','EQUALSFIRST','TREEPATH','BACKQUOTE',
            ]
### Reserved keywords

    reserved = {'if':'IF','else':'ELSE','public':'IDENTTYPE',
                'private':'IDENTTYPE','fun':'FUN','in':'ARGTYPE',
                'out':'ARGTYPE','inout':'ARGTYPE','optional':'ARGTYPE',
                'as_is':'ARGTYPE','switch':'SWITCH','case':'CASE',
                'for':'FOR','while':'WHILE','break':'BREAK',
                'continue':'CONTINUE','not':'NOT_S','and':'AND_S','or':'OR_S',
                'nor':'NOR_S','mod':'MOD_S','eq':'EQ_S','ne':'NE_S','gt':'GT_S',
                'ge':'GE_S','lt':'LT_S','le':'LE_S','default':'DEFAULT',
                }
    tokens += list(set(reserved.values()))

### ignore comments denoted by /* ..... */  NOTE: Nested comments allowed which required the states trick

    states = (('nestcomment','exclusive'),)

    def t_nestcomment_comment(t):
        r'(.|\n)*?(\*/|/\*)'
        if t.value[-2:]=='/*':
            t.lexer.push_state('nestcomment')
        else:
            t.lexer.pop_state()

    def t_COMMENT(t):
        r'(/\*(.|\n)*?(\*/|/\*))'
        if t.value[-2:]=='/*':
            t.lexer.push_state('nestcomment')
            t.lexer.push_state('nestcomment')

### integer token including hex,binary,octal and decimal
    integer=r'0[Xx][0-9A-Fa-f]+|0[Bb][01]+|0[0-7]+|[1-9]+[0-9]*|0'

    def fix_backquotes(in_str):
        import re
        def replace_backquote_string(match):
            mstr=match.group(0)
            if len(mstr)>4:
                ans = mstr
            elif mstr[1] == '\\':
                ans=mstr
            elif mstr[1] in 'mntr':
                ans=eval("'"+mstr+"'")
            else:
                ans = chr(int(mstr[1:],8))
            return ans
        ans = re.sub(r'\\[0-7]+|\\[\\mntr]',replace_backquote_string,in_str)
        return ans

### string token with double quotes converted to String() instance
    @lex.TOKEN(r'"(?:[^"\\]|\\.)*"')
    def t_T(t):
        t.value=String(fix_backquotes(t.value).replace('\\"','"').replace("\\'","'").replace('\\\\','\\')[1:-1])
        return t

### string token with single quotes converted to String() instance
    @lex.TOKEN(r"'(?:[^'\\]|\\.)*'")
    def t_T2(t):
        t.value=String(fix_backquotes(t.value).replace("\\'","'").replace('\\"','"').replace('\\\\','\\')[1:-1])
        return t

### unsigned byte token converted to Uint8() instance
    @lex.TOKEN(r'(?i)(byte_unsigned|unsigned_byte)\((?P<number1>('+integer+r'))\)|(?P<number2>('+integer+r'))(bu|ub)')
    def t_BU(t):        
        t.value=Uint8(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### unsigned word converted to Uint16() instance
    @lex.TOKEN(r'(?i)(word_unsigned|unsigned_word)\((?P<number1>('+integer+r'))\)|(?P<number2>('+integer+r'))(wu|uw)')
    def t_WU(t):
        t.value=Uint16(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### signed word converted to Int16() instance
    @lex.TOKEN(r'(?i)word\((?P<number1>('+integer+r'))\)|(?P<number2>('+integer+r'))w')
    def t_W(t):
        t.value=Int16(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### unsigned quadword converted to Uint64() instance
    @lex.TOKEN(r'(?i)(quadword_unsigned|unsigned_quadword)\((?P<number1>('+integer+r'))\)|(?P<number2>('+integer+r'))(uq|qu)')
    def t_QU(t):
        t.value=Uint64(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### unsigned int converted to Uint32() instance
    @lex.TOKEN(r'(?i)(long_unsigned|unsigned_long)\((?P<number1>('+integer+r'))\)|(?P<number2>('+integer+r'))(lu|ul|u)')
    def t_LU(t):
        t.value=Uint32(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### signed quadword converted to Int64() instance
    @lex.TOKEN(r'(?i)quadword\((?P<number1>('+integer+r'))\)|(?P<number2>('+integer+r'))q')
    def t_Q(t):
        t.value=Int64(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### Float instance converted to either Float32() or Float64() instance
    @lex.TOKEN(r'(?i)([0-9]+\.(?!\.)[0-9]*|[0-9]*\.[0-9]+|[0-9]+)(?P<exp>([dgef]))[-+]?[0-9]+|[0-9]+\.(?!\.)[0-9]*|[0-9]*\.[0-9]+')
    def t_FloatNum(t):
        exp=t.lexer.lexmatch.group('exp')
        if exp is not None:
            exp=exp.lower()
        val=t.value.lower().replace('d','e').replace('g','e').replace('f','e')
        if exp is None or exp == 'e' or exp == 'f':
            t.value=Float32(val)
        else:
            t.value=Float64(val)
            if 'inf' in repr(t.value.data()):
                t.value=Float32(val)
        return t

### signed byte converted to Int8() instance
    @lex.TOKEN(r'(?i)byte\((?P<number1>('+integer+'))\)|(?P<number2>('+integer+'))b')
    def t_B(t):
        t.value=Int8(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### signed int converted to Int32() instances. NOTE must be end of the scalar tokens to work for some reason.
    @lex.TOKEN(r'(?i)long\((?P<number1>('+integer+'))\)|(?P<number2>('+integer+'))l?')
    def t_L(t):
        t.value=Int32(int(t.lexer.lexmatch.group('number1') or t.lexer.lexmatch.group('number2'),0))
        return t

### Ident or builtin constant converted to either Ident() instance or a Builtin() instance for constants such as $PI
    @lex.TOKEN(r'(?i)(\$([a-z]+[a-z0-9_\$]*)|([0-9]+[a-z_\$]+[a-z0-9_\$]*))|(_[a-z0-9_\$]*)')
    def t_IDENT(t):
        if t.value.lower()=="$roprand":
            import numpy as np
            t.value=np.frombuffer(np.getbuffer(np.int32(2147483647)),dtype=np.float32)[0]
        else:
            try:
                t.value=Builtin(t.value,())
            except Exception:
                t.value=Ident(t.value)
        return t

### Placeholders
    @lex.TOKEN(r'\$[1-9]*[0-9]*')
    def t_PLACEHOLDER(t):
        if len(t.value)==1:
            idx=replacementArgs.idx
        else:
            idx=int(t.value[1:])
        if idx <= len(replacementArgs.args):
            t.value=makeData(replacementArgs.args[idx-1])
        else:
            raise Exception('%TDI-E-TdiMISS_ARG, Missing argument is required for function')
        replacementArgs.idx=idx+1
        return t

### Tree path \[treename::]tagname[.|:]node[.|:]node...
    pname=r'[a-z][a-z0-9$_]*'
    @lex.TOKEN(r'(?i)(((\\('+pname+r'::)?|[\.:])?'+pname+r')|(\.-(\.?-)*))([\.:]'+pname+r')*')
    def t_TREEPATH(t):
        if t.value.lower() in reserved:
            t.type=reserved[t.value.lower()]
        else:
            import re
            original_value=t.value
            if re.match(r'[\s]*(\(|->)',t.lexer.lexdata[t.lexer.lexpos:]) is not None:
                skip=t.value.find(':')
                if skip == 0:
                    t.lexer.lexpos=t.lexer.lexpos-len(t.value)+1
                    t.type='COLON'
                    t.value=':'
                else:
                    if skip > -1:
                        t.lexer.lexpos=t.lexer.lexpos-len(t.value)+skip
                        t.value=t.value[0:skip]
                    t.type='NAME'
            else:
                try:
                    t.value=Tree().getNode(t.value)
                except:
                    if t.value[0] in '.:':
                        t.value='\\'+Tree().tree+'::TOP'+t.value
                    elif t.value[0] == '\\':
                        if t.value.find('::') == -1:
                            t.value='\\'+Tree().tree+'::'+t.value[1:]
                    else:
                        t.value='\\'+Tree().tree+'::TOP:'+t.value
                    t.value=TreePath(t.value.upper())
                t.value.original_value=original_value
        return t

### Various operators
    t_PLUS    = r'\+'
    t_MINUS   = r'-'
    t_TIMES   = r'\*'
    t_DIVIDE  = r'/'
    t_EQUALS  = r'=='
    t_EQUAL   = r'='
    t_LPAREN  = r'\('
    t_RPAREN  = r'\)'
    t_LBRACE  = r'{'
    t_RBRACE  = r'}'
    t_LBRACKET = r'\['
    t_RBRACKET = r'\]'
    t_COMMA   = r','
    t_ARROW   = r'->'
    t_GREATER = r'>'
    t_GREATER_EQUAL = r'>='
    t_LESS    = r'<'
    t_LESS_EQUAL = r'<='
    t_NOT_EQUAL = r'!=|<>'
    t_RAISE   = r'\^|\*\*'
    t_QUESTION = r'\?'
    t_LSHIFT = r'<<'
    t_RSHIFT = r'>>'
    t_SEMICOLON = r';'
    t_IAND = r'&'
    t_AND = r'&&'
    t_NOT = r'!'
    t_PLUSPLUS = r'\+\+'
    t_MINUSMINUS = r'--'
    t_SLASHSLASH = r'//'
    t_IOR = r'\|'
    t_OR = r'\|\|'
    t_INOT = r'~'
    t_EQUALSFIRST = r'\+=|-=|\*=|/=|\^=|\*\*=|<==|>==|>>=|<<=|&=|&&=|!==|\|=|\|\|=|//='
    t_BACKQUOTE = r'`'


    def t_COLON(t):
        r'\.\.|:'
        t.value=':'
        return t
        

### Name token which begins with an alpha followed by zero or more of aphanumeric or underscore
### or a reserved word token such as if, while, switch, for ...
    def t_NAME(t):
        r'(?i)\b[a-z]+[a-z0-9_]*\b'
        t.type = reserved.get(t.value.lower(),'NAME')
        return t


# Define a rule so we can track line numbers
    def t_newline(t):
        r'\n+'
        t.lexer.lineno += len(t.value)


# Error handling rule
    def t_ANY_error(t):
        print( "Illegal character '%s'(%d) at line %d around '%s'" % (t.value[0],ord(t.value[0]),t.lexer.lineno,t.lexer.lexdata[t.lexer.lexpos-10:t.lexer.lexpos+10]))
#        t.lexer.skip(1)

# A string containing ignored characters (spaces and tabs)
    t_ANY_ignore  = ' \t\r\0'


# Build the lexer

    lex.lex(debug=0,optimize=optimized,lextab='tdilextab')

    precedence = (
        ('right','EQUAL'),
        ('right','COMMA'),
        ('left','COLON'),
        ('left','QUESTION'),
        ('left','OR','AND','OR_S','AND_S'),
        ('left','GREATER','GREATER_EQUAL','LESS','LESS_EQUAL','EQUALS','NOT_EQUAL','GT_S','GE_S','LT_S','LE_S','EQ_S','NE_S'),
        ('left','SLASHSLASH'),
        ('left','PLUS','MINUS','IOR','IAND'),
        ('left','TIMES','DIVIDE'),
        ('left','RAISE','MOD_S'),
        ('right','RSHIFT','LSHIFT','UNOP'),
        ('left','LBRACKET','LPAREN','IDENTTYPE'),
        )

    def p_compilation(t):
        """compilation : statements\n| operand\n | operand SEMICOLON
        """
        t[0]=t[1]
        if isinstance(t[0],Builtin) and len(t[0].args)==2 and isinstance(t[0].args[0],String) and isinstance(t[0].args[1],String):
            t[0]=String(str(t[0].args[0])+str(t[0].args[1]))

### operands can be arguments to operators
    def p_operand(t):
        """operand : scalar\n| operation\n| parenthisized_operand\n| ident\n| vector\n| TREEPATH"""
        t[0]=t[1]

### Subscripting (i.e. _a[32])
    def p_subscript(t):
        """operation : operand vector"""
        if len(t) == 2:
            t[0]=t[1]
        else:
            args=[t[1],]
            if isinstance(t[2],Builtin):
                for arg in t[2].args:
                    args.append(arg)
            else:
                for arg in t[2]:
                    args.append(arg)
            t[0]=Builtin('subscript',tuple(args))

### parenthisized operands such as (1+2) for specifying things like (1+2)*10
    def p_parenthisized_operand(t):
        'parenthisized_operand : LPAREN operand RPAREN'
        t[0]=t[2]

### Basic scalars supported by MDSplus
    def p_scalar(t):
        'scalar : BU \n| B \n| WU \n| W \n| LU \n| L \n| QU \n| Q \n| FloatNum \n| T \n| T2 \n| missing'
        t[0]=t[1]

### Ken variable (i.e. _gub or public _gub)
    def p_ident(t):
        """ident : IDENT\n| PLACEHOLDER\n| IDENTTYPE IDENT"""
        if len(t) == 2:
            t[0]=t[1]
        else:
            t[0]=Builtin(t[1],(str(t[2]),))

### Missing value specified by asterisk
    def p_missing(t):
        'missing : TIMES'
        t[0]=makeData(None)

### Range constructor (a : b [:c])
    def p_range(t):
        """range : range COLON operand\n| operand COLON operand"""
        if isinstance(t[1],list):
            t[1].append(t[3])
            t[0]=t[1]
        else:
            t[0]=[t[1],t[3]]

    def p_op_range(t):
        """operation : range"""
        t[0]=Range(tuple(t[1]))

### Loop control operations (i.e. break, continue)
    def p_loop_control(t):
        'operation : BREAK\n| CONTINUE'
        t[0]=Builtin(t[1],tuple())

### Unary arithmetic operations such as ~a, -a
    def p_unaryop(t):
        """operation : NOT operand %prec UNOP\n| INOT operand %prec UNOP\n| MINUS operand %prec UNOP\n| PLUS operand %prec UNOP
        | NOT_S operand %prec UNOP"""
        ops = {'!':'NOT','~':'INOT','-':'UNARY_MINUS','not':'NOT','+':'UNARY_PLUS'}
        if t[1]=='-' and isinstance(t[2],Scalar):
            t[0]=makeData(-t[2].data())
        elif t[1]=='+' and isinstance(t[2],Scalar):
            t[0]=t[2]
        else:
            t[0]=Builtin(ops[t[1].lower()],(t[2],))

### Binary arithmetic operations such as a+b a>=b a^b a&&b
    def p_binop(t):
        """operation : operand PLUS operand
        | operand MINUS operand\n| operand TIMES operand\n| operand DIVIDE operand
        | operand RAISE operand\n| operand RSHIFT operand\n| operand LSHIFT operand
        | operand LESS operand\n| operand GREATER operand\n| operand LESS_EQUAL operand
        | operand GREATER_EQUAL operand\n| operand EQUALS operand \n| operand IAND operand
        | operand AND operand \n| operand OR operand \n| operand NOT_EQUAL operand
        | operand IOR operand\n| operand AND_S operand \n| operand OR_S operand\n| operand NOR_S operand
        | operand MOD_S operand
        | MOD_S LPAREN operand COMMA operand RPAREN
        | operand GT_S operand\n| operand GE_S operand\n| operand LT_S operand\n| operand LE_S operand
        | operand EQ_S operand\n| operand NE_S operand
        """
        ops = {'+':'add','-':'subtract','*':'multiply','/':'divide','<':'lt',
               '>':'gt','^':'power','**':'power','<=':'le','>=':'ge','==':'eq',
               '>>':'shift_right','<<':'shift_left','&':'iand','&&':'and','!=':'NE','<>':'NE',
               '|':'ior','||':'or','and':'and','or':'or','nor':'nor','mod':'MOD',
               'gt':'gt','ge':'ge','lt':'lt','le':'le','eq':'eq','ne':'ne'}
        if len(t)==4:
            t[0]=Builtin(ops[t[2].lower()],(t[1],t[3]))
        else:
            t[0]=Builtin(ops[t[1].lower()],(t[3],t[5]))

### Concatenation operator a // b [// c]
### Jump through hoops to emulate weird tdi behavior which concatenates string types at compile time except for the
### caveat that if the number of concatenation arguments is even and all strings concatenate the first n-1 and
### then make a concat function that concats the first n-1 with the nth, other wise concat them all. If any of the
### items being concatenated is not a string then don't concat anything at run time.
    class Concat(list):
        def get(self):
            compile_time_concat=True
            for arg in self:
                if not isinstance(arg,(str,String)):
                    compile_time_concat=False
                    break
            if compile_time_concat:
                c=list()
                c.append(self[0])
                if len(self) % 2 == 0:
                    for arg in self[1:-1]:
                        c[-1]=str(c[-1])+str(arg)
                    c.append(self[-1])
                else:
                    for arg in self[1:]:
                        c[-1]=String(str(c[-1])+str(arg))
                if len(c)>1:
                    return Builtin('concat',tuple(c))
                else:
                    return c[0]
            else:
                return Builtin('concat',tuple(self))

    def p_concat(t):
        'concat : operand SLASHSLASH operand\n| concat SLASHSLASH operand\n operation : concat'
        if len(t)==4:
            if isinstance(t[1],Concat):
                t[1].append(t[3])
                t[0]=t[1]
            else:
                t[0]=Concat([t[1],t[3]])
        else:
            t[0]=t[1].get()
            if isinstance(t[0],String):
                t.type='scalar'

### Conditional operation (i.e. a ? b : c)
    def p_conditional(t):
        'operation : operand QUESTION operand COLON operand'
        t[0]=Builtin('conditional',(t[3],t[5],t[1]))

### Ident increment/decrement (i.e. _i++, _i--, ++_i, --_i)
    def p_inc_dec(t):
        """operation : ident PLUSPLUS\n| ident MINUSMINUS\n| PLUSPLUS ident\n| MINUSMINUS ident"""
        op={'++':'_inc','--':'_dec'}
        if isinstance(t[1],str):
            t[0]=Builtin('pre'+op[t[1]],(t[2],))
        else:
            t[0]=Builtin('post'+op[t[2]],(t[1],))

### Ken variable assignment (i.e. _i=1)
    def p_assignment(t):
        'operation : operand EQUAL operand %prec EQUAL'
        t[0]=Builtin('EQUALS',(t[1],t[3]))

### Argument list for function calls (i.e. ([a[,b[,c]]])  )
    def p_arglist(t):
        """arglist : LPAREN args RPAREN\n args :\n| args operand\n| args COMMA\n| args ARGTYPE LPAREN operand RPAREN"""
        if len(t)==4:
            t[0]=t[2]
        elif len(t)==1:
            t[0]=list()
        else:
            if len(t)==6:
                t[2]=Builtin(t[2],(t[4],))
            if isinstance(t[2],str):
                if len(t[1])==0:
                    t[1].append(None)
                t[1].append(None)
            else:
                if len(t[1]) > 0 and (t[1][-1] is None or isinstance(t[1][-1],EmptyData)):
                    t[1][-1]=t[2]
                else:
                    t[1].append(t[2])
            t[0]=t[1]

### Function call (i.e. gub(1,2,,3)) also handles build_xxx() and make_xxx() operations
    def p_function(t):
        """operation : NAME arglist\n| EQ_S arglist\n| NE_S arglist\n| LE_S arglist
        | LT_S arglist\n| GT_S arglist\n| GE_S arglist"""

        def doBuild(name,args):
            def build_with_units(args):
                args[0].units=args[1]
                return args[0]

            def build_with_error(args):
                args[0].error=args[1]
                return args[0]

            def build_param(args):
                try:
                    args[0].help=args[1]
                    args[0].validation=args[2]
                except:
                    pass
                return args[0]

            def build_slope(args):
                new_args=list()
                if len(args)>1:
                    new_args.append(args[1])
                else:
                    new_args.append(None)
                if len(args)>2:
                    new_args.append(args[2])
                else:
                    new_args.append(None)
                new_args.append(args[0])
                return Range(tuple(new_args))

            def buildPath(args):
                if isinstance(args[0],(str,String)):
                    name=str(args[0])
                    if len(name) > 1 and name[0:2]=='\\\\':
                        name=name[1:]
                    ans = TreePath(name)
                else:
                    ans = Builtin('build_path',args)
                return ans
            
            def buildCall(args):
                ans=Call(args[1:])
                ans.retType=args[0]
                return ans

### retain original node specifiers when building a using function
            def buildUsing(args_in):
                def restoreTreePaths(arg):
                    if isinstance(arg,Compound):
                        args=list()
                        for a in arg.args:
                            args.append(restoreTreePaths(a))
                        arg.args=tuple(args)
                        ans = arg
                    elif isinstance(arg,(TreePath,TreeNode)) and hasattr(arg,'original_value'):
                        ans = TreePath(arg.original_value)
                    else:
                        ans = arg
                    return ans

                args=list()
                for arg in args_in:
                    args.append(restoreTreePaths(arg))
                ans = Builtin('using',tuple(args))
                return ans

            known_builds={'BUILD_ACTION':Action,
                          #BUILD_CONDITION':Condition,
                          'BUILD_CONGLOM':Conglom,
                          'BUILD_DEPENDENCY':Dependency,
                          'BUILD_DIM':Dimension,
                          'BUILD_DISPATCH':Dispatch,
                          'BUILD_EVENT':Event,
                          'BUILD_FUNCTION':Builtin,
                          'BUILD_METHOD':Method,
                          'BUILD_PARAM':build_param,
                          'BUILD_PROCEDURE':Procedure,
                          'BUILD_PROGRAM':Program,
                          'BUILD_RANGE':Range,
                          'BUILD_ROUTINE':Routine,
                          'BUILD_SIGNAL':Signal,
                          'BUILD_SLOPE':build_slope,
                          'BUILD_WINDOW':Window,
                          'BUILD_WITH_UNITS':build_with_units,
                          'BUILD_CALL':buildCall,
                          'BUILD_WITH_ERROR':build_with_error,
                          'BUILD_OPAQUE':Opaque,
                          'BUILD_PATH':buildPath,
                          'USING':buildUsing,}
            return known_builds[name.upper()](args)

        def doMake(name,args):
            for arg in args:
                if not isinstance(arg,(Array,Scalar,EmptyData)) and arg is not None:
                    raise Exception('use make opcode')
            name=name.upper().replace('MAKE_','BUILD_')
            if 'BUILD_' in name:
                return doBuild(name,tuple(args))
            else:
                raise Exception("not a make_ call")

        try:
            t[0]=doBuild(t[1],tuple(t[2]))
        except Exception:
            try:
                t[0]=doMake(t[1],tuple(t[2]))
            except Exception:
                try:
                    numbers=['byte','byte_unsigned','unsigned_byte','word','word_unsigned','unsigned_word',
                             'long','long_unsigned','unsigned_long','quadword','quadword_unsigned','unsigned_quadword',
                             'float','double','f_float','g_float','d_float','fs_float','ft_float']
                    if t[1].lower() in numbers and (isinstance(t[2][0],Scalar) or isinstance(t[2][0],Array)):
                        t[0]=Data.evaluate(Builtin(t[1],tuple(t[2])))
                    else:
                        t[0]=Builtin(t[1],tuple(t[2]))
                except Exception:
                    t[0]=Builtin('ext_function',tuple([None,t[1]]+t[2]))

### call library (i.e. library->gub(a,b,c))
    def p_rettype(t):
        'rettype : COLON NAME'
        rettypes={'bu':2,'wu':3,'lu':4,'qu':5,'b':6,'w':7,'l':8,'q':9,'f':10,'d':11,'fc':12,'dc':13,'t':14,
                  'dsc':24,'p':51,'f':52,'fs':52,'ft':53,'fsc':54,'ftc':55}
        if t[2].lower() in rettypes:
            t[0]=rettypes[t[2].lower()]

    def p_call(t):
        """operation : NAME ARROW NAME arglist\n| NAME ARROW NAME rettype arglist"""
        if len(t)==5:
            t[0]=Call(tuple([t[1],t[3]]+t[4]))
        else:
            t[0]=Call(tuple([t[1],t[3]]+t[5]),opcode=t[4])

### Loop and fun statements found inside braces and sometimes in parens
    def p_optional_semicolon(t):
        """optional_semicolon : SEMICOLON\n| empty"""
        pass

    class CommaList(list):
        def get(self):
            return Builtin('comma',tuple(self))

    def p_statement(t):
        """statement : operand SEMICOLON\n| comma_list SEMICOLON\n| comma_list\n| operand\n| SEMICOLON
        """
        if isinstance(t[1],str):
            pass
        elif isinstance(t[1],CommaList):
            t[0]=t[1].get()
        else:
            t[0]=t[1]

    def p_statements(t):
        """statements : statement\n| statements statement\n| statements braced_statements"""
        if len(t)==2:
            t[0]=Builtin('statement',(t[1],))
        else:
            if t[2] is None:
                t[0]=t[1]
            elif len(t[1].args) < 250:
                t[1].args=tuple(list(t[1].args)+[t[2]])
                t[0]=t[1]
            else:
                t[0]=Builtin('statement',(t[1],t[2]))

    def p_braced_statements(t):
        """braced_statements : LBRACE statements RBRACE optional_semicolon\n | LBRACE RBRACE optional_semicolon"""
        if len(t)==5:
            if len(t[2].args)==1:
                t[0]=t[2].args[0]
            else:
                t[0]=t[2]
        else:
            pass

### paren statement list as in if_error(_a,(_a=1;_b++),42)
    def p_statement_list(t):
        'operation : LPAREN statements RPAREN'
        if len(t[2].args)==1:
            t[0]=t[2].args[0]
        else:
            t[0]=t[2]

### comma operand list as in _a=1,_b=2,3
    def p_comma_list(t):
        """comma_list : COMMA\n| operand COMMA\n| comma_list COMMA\n| comma_list operand"""
        if isinstance(t[1],CommaList):
            if isinstance(t[2],str):
                if t[1].lastNone:
                    t[1].append(None)
            else:
                t[1].append(t[2])
                t[1].lastNone=False
            t[0]=t[1]
        else:
            t[0]=CommaList()
            if len(t)==2:
                t[0].append(None)
                t[0].lastNone=True
            else:
                t[0].append(t[1])
                t[0].lastNone=False

### comma operation as in (_a=1,_b=2,3)
    def p_comma_list_operation(t):
        'operation : LPAREN comma_list RPAREN'
        t[0]=t[2].get()

    def p_empty(t):
        'empty :'
        pass

### For statement (i.e. for (_x=1;_x<10;_x++){statements...} or for (...) statement
    def p_optional_comma_list(t):
        """optional_operand : comma_list\n| operand\n| empty"""
        if isinstance(t[1],CommaList):
            t[0]=t[1].get()
        else:
            t[0]=t[1]

    def p_for(t):
        """operation : FOR LPAREN optional_operand SEMICOLON operand SEMICOLON optional_operand RPAREN braced_statements
        | FOR LPAREN optional_operand SEMICOLON operand SEMICOLON optional_operand RPAREN statement"""
        t[0]=Builtin('for',(t[3],t[5],t[7],t[9]))

### If statement (i.e. if (_x<10) {_x=42;} else {_x=43;})
    def p_if_begin(t):
        """if_begin : IF LPAREN operand RPAREN"""
        t[0]=t[3]

    def p_ifelse_body(t):
        """ifelse_body : braced_statements\n| statement"""
        t[0]=t[1]

    def p_if(t):
        """operation : if_begin ifelse_body\n| if_begin ifelse_body ELSE ifelse_body"""
        args=[t[1],t[2]]
        if len(t) > 3:
            args.append(t[4])
        t[0]=Builtin('if',tuple(args))

### While statement (i.e. while(expression){statements;} )
    def p_while(t):
        """operation : WHILE LPAREN operand RPAREN braced_statements
        | WHILE LPAREN operand RPAREN statement"""
        t[0]=Builtin('while',(t[3],t[5]))

### FUN definition (i.e. public fun gub(args){statements} )
    def p_fun_arg(t):
        """fun_arg : ARGTYPE IDENT\n| ARGTYPE ARGTYPE IDENT\n| IDENT\n| ARGTYPE LPAREN IDENT RPAREN\n| ARGTYPE ARGTYPE LPAREN IDENT RPAREN"""
        if len(t) == 2:
            t[0]=t[1]
        elif len(t) == 3:
            t[0]=Builtin(t[1],(str(t[2]),))
        elif len(t) == 4:
            t[0]=Builtin(t[1],(Builtin(t[2],(str(t[3]),)),))
        elif len(t) == 5:
            t[0]=Builtin(t[1],(t[3],))
        else:
            t[0]=Builtin(t[1],(Builtin(t[2],(t[4],)),))

    def p_fun_args(t):
        """fun_args : LPAREN\n| fun_args fun_arg\n| fun_args COMMA\n| fun_args RPAREN"""
        if len(t)==2:
            t[0]=list()
        elif isinstance(t[2],str):
            t[0]=t[1]
        else:
            t[1].append(t[2])
            t[0]=t[1]
                              
    def p_fun(t):
        """operation : IDENTTYPE FUN NAME fun_args braced_statements
        | FUN IDENTTYPE NAME fun_args braced_statements
        | FUN NAME fun_args braced_statements"""
        args=list()
        if len(t) == 6:
            if t[1].lower() == 'fun':
                itype=t[2]
            else:
                itype=t[1]
            args.append(Builtin(itype,(t[3],)))
            args.append(t[5])
            for arg in t[4]:
                args.append(arg)
        else:
            args.append(t[2])
            args.append(t[4])
            for arg in t[3]:
                args.append(arg)
        t[0]=Builtin('fun',tuple(args))

### Vector/Array declarations (i.e. [_a,_b,_c] or [1,2,3,])
    def p_vector(t):
        """vector_part : LBRACKET operand
        | LBRACKET
        | vector_part COMMA operand
        vector : vector_part RBRACKET"""
        if isinstance(t[1],str):
            if len(t)==2:
                t[0]=Builtin('vector',tuple())
                t[0].isarray=True
            else:
                t[0]=Builtin('vector',(t[2],))
                t[0].isarray = isinstance(t[2],Scalar) or isinstance(t[2],Array)
        elif t[2] == ',':
            args=list(t[1].args)
            if len(args) > 250:
                args=[Builtin('vector',tuple(args)),t[3]]
            else:
                args.append(t[3])
            t[1].args=tuple(args)
            t[0]=t[1]
            t[0].isarray = t[1].isarray and (isinstance(t[3],Scalar) or isinstance(t[3],Array))
        else:
            if t[1].isarray:
                t[0]=Data.evaluate(t[1])
            else:
                t[0]=Builtin('vector',t[1].args)

### Switch statement (i.e. switch(_a) {case(42) {statements} case(43) {statements}} )
    def p_case(t):
        """case : CASE LPAREN operand RPAREN braced_statements\n| CASE LPAREN operand RPAREN statement
        | CASE LPAREN operand RPAREN\n| CASE DEFAULT braced_statements
        | CASE DEFAULT statement\n| statement"""
        if len(t)==4:
            t[0]=Builtin('default',(t[3],))
        elif len(t)==5:
            t[0]=Builtin('case',(None,None))
            t[0].args=(t[3],)
            t[0].doAppendCase=True
        elif len(t)==6:
            t[0]=Builtin('case',(t[3],t[5]))
        else:
            t[0]=t[1]

    def p_cases(t):
        """cases : case\n| cases case"""

        def findCaseWithNoStatements(case,parent=None,argidx=0):
            ans=None
            if isinstance(case,Builtin):
                if case.name=='CASE' and len(case.args)==1:
                    ans = {'case':case,'parent':parent,'argidx':argidx}
                else:
                    for idx in range(len(case.args)):
                        ans = findCaseWithNoStatements(case.args[idx],case,idx)
                        if ans is not None:
                            break
            return ans

        def appendCase(cases,case):
            c=findCaseWithNoStatements(cases)
            if c is not None:
                appendTo=c
            else:
                appendTo={'case':cases,'parent':None,'argidx':0}
            if len(appendTo['case'].args) < 250:
                appendTo['case'].args=tuple(list(appendTo['case'].args)+[case,])
                return cases
            else:
                statement = Builtin('statement',(appendTo['case'],case))
                if appendTo['parent']==None:
                    return statement
                else:
                    args=list(appendTo['parent'].args)
                    args[appendTo['idx']]=statement
                    appendTo['parent'].args=tuple(args)
                    return cases

        if len(t)==3:
            t[1]=appendCase(t[1],t[2])
        t[0]=t[1]

    def p_switch(t):
        """operation : SWITCH LPAREN operand RPAREN LBRACE cases RBRACE"""
        t[0]=Builtin('switch',(t[3],t[6]))

### "Equals First" statement (i.e. _gub+=42)
    def p_operand_equal_first(t):
        """operation : ident EQUALSFIRST operand"""
        ops = {'+':'add','-':'subtract','*':'multiply','/':'divide','<':'lt',
                  '>':'gt','^':'power','**':'power','<=':'le','>=':'ge','==':'eq',
                  '>>':'shift_right','<<':'shift_left','&':'iand','&&':'and','!=':'NE',
                  '|':'ior','||':'or','//':'concat'}
        items=ops.items()
        ef_dict=dict()
        for itm in items:
            ef_dict.setdefault(itm[0]+'=',itm[1])
        t[0]=Builtin('equals_first',(Builtin(ef_dict[t[2]],(t[1],t[3])),))

### BACKQUOTED expression (i.e. `getnci(gub,"RECORD")
    def p_operand_backquote(t):
        """operand : BACKQUOTE operand"""
        t[0]=Data.evaluate(t[2])

### Handle syntax errors
    def p_error(t):
        if t is not None:
            print("Syntax error at '%s' in line %d: %s" % (t.value,t.lexer.lineno,t.lexer.lexdata[t.lexer.lexpos-10:t.lexer.lexpos+10]))
        else:
            print("Syntax error")

    import yacc
    yacc.yacc(write_tables=optimized,debug=0,optimize=optimized,tabmodule='tdiparsetab')
    return yacc.parse(text)
Beispiel #42
0
    if p[2] == ']': p[0] = []
    else: p[0] = p[2]


def t_error(t):
    print "**ERROR: unexpected character '%s'" % t.value[0]
    t.lexer.skip(1)


def p_error(t):
    print "**ERROR: malformed expression", t
    exit(1)


lexer = lex.lex(optimize=0, debug=0)
parser = yacc.yacc(optimize=0, debug=0)


def parse_tree(s):
    """
    Parser for compound expressions of the form
    a(b(c),[d]) and return a python tuple 
    ('a', ('b', 'c'), ['d']).
    """
    return parser.parse(lexer=lexer, input=s)


def parse_tree_file(filename):
    """
    Parser for compound expressions of the form
    a(b(c),[d]) and return a python tuple 
Beispiel #43
0
    t[0] = IfStatement(t[3], t[5], NullNode())

def p_selection_statement_02(t):
    '''selection_statement : IF LPAREN expression RPAREN statement ELSE statement'''
    t[0] = IfStatement(t[3], t[5], t[7])

def p_statement_list_02(t):
    '''statement_list : statement'''
    t[0] = StatementList(t[1])

def p_statement_list_03(t):
    '''statement_list : statement_list statement'''
    t[1].add(t[2])
    t[0] = t[1]

def p_empty(t):
    'empty :'
    pass

def p_error(t):
    print "You've got a syntax error somewhere in your code."
    print "It could be around line %d." % t.lineno
    print "Good luck finding it."
    raise ParseError()

yacc.yacc(debug=1)

#  ---------------------------------------------------------------
#  End of cparse.py
#  ---------------------------------------------------------------
Beispiel #44
0
    
def p_val_3(t):
    'val : WORD'
    t[0] = t[1]


# XXX also don't yet handle proximity operator

def p_error(t):
    raise ParseError ('Parse p_error ' + str (t))

precedence = (
    ('left', 'LOGOP'),
    )

yacc.yacc (debug=0, tabmodule = 'PyZ3950_parsetab')
#yacc.yacc (debug=0, tabpackage = 'PyZ3950', tabmodule='PyZ3950_parsetab')


def attrset_to_oid (attrset):
    l = attrset.lower ()
    if _attrdict.has_key (l):
        return _attrdict [l]
    split_l = l.split ('.')
    if split_l[0] == '':
        split_l = oids.Z3950_ATTRS + split_l[1:]
    try:
        intlist = map (string.atoi, split_l)
    except ValueError:
        raise ParseError ('Bad OID: ' + l)
    return asn1.OidVal (intlist)
Beispiel #45
0
        p[0] = p[1]
        p[0].args.insert(1, p[3])
    else:
        p[0] = node.expr(op=p[2], args=node.expr_list([p[1], p[3]]))


def p_error(p):
    #import pdb; pdb.set_trace()
    #print "p_error",p
    if ("." in options.syntax_errors
            or os.path.basename(options.filename) in options.syntax_errors):
        return p
    raise syntax_error(p)


parser = yacc.yacc(start="top")


def parse(buf, filename=""):
    options.filename = filename
    try:
        new_lexer = lexer.new()
        p = parser.parse(buf, tracking=1, debug=0, lexer=new_lexer)
        return p
    except lexer.IllegalCharacterError as (lineno, column, c):
        #import pdb; pdb.set_trace()
        print 'Error:%s:%s.%s:illegal character:%s' % (filename, lineno,
                                                       column, c)
        return []
    except NotImplementedError as e:
        print 'Error:%s:not implemented:%s' % (filename, e)
Beispiel #46
0
def p_Statement(p): # En realidad instrucciones de muchas formas por ejemplo if_stmt
    """Statement : ID ';'"""
    p[0]=Statement(p[1])

def p_Type(p):
    '''Type : INTEGER
    | STRING
    | ID
    '''
    p[0] = Type(p[1])

def p_empty(p):
    'empty :'
    pass

def p_error(p):
    print "Error de sintaxis ", p

yacc.yacc(method='SLR')

    
s = open("prueba.txt").read()
    
# Se construye el ast: raiz=p[0] de la primera regla
raiz = yacc.parse(s)
raiz.imprimir()




Beispiel #47
0

def p_pair(p):
    '''PAIR : string colon VALUE'''
    p[0] = ("pair", p[1], p[3])


    
def p_array(p):
    '''ARRAY : lbracket ELEMENTS rbracket
             | lbracket rbracket
    '''
    p[0] = ("elements", p[2]) if len(p) == 4 else ("elements",)
    


def p_elements(p):
    '''ELEMENTS : VALUE comma ELEMENTS | VALUE'''
    p[0] = p[1] if len(p) == 2 else [p[1]] + [p[3]]




if __name__ == "__main__":
    parser = yacc.yacc(parser='RD')
    tree = parser.parse(sys.stdin.read(), lexer)
    if tree is not None:
        print tree
    else:
        print "Parsing failed!"
Beispiel #48
0
        # tell which is which.  So understanding of colon expressions
        # is put off until after "resolve".
        p[0] = p[1]
        p[0].args.insert(1,p[3])
    else:
        p[0] = node.expr(op=p[2],args=node.expr_list([p[1],p[3]]))

def p_error(p):
    #import pdb; pdb.set_trace()
    #print "p_error",p
    if ("." in options.syntax_errors or
        os.path.basename(options.filename) in options.syntax_errors):
        return p
    raise syntax_error(p)

parser = yacc.yacc(start="top")

def parse(buf,filename=""):
    options.filename = filename
    try:
        new_lexer = lexer.new()
        p = parser.parse(buf,tracking=1,debug=0,lexer=new_lexer)
        return p
    except lexer.IllegalCharacterError as (lineno,column,c):
        #import pdb; pdb.set_trace()
        print 'Error:%s:%s.%s:illegal character:%s' % (filename,lineno,column,c)
        return []
    except NotImplementedError as e:
        print 'Error:%s:not implemented:%s' % (filename,e)
        return []
    except syntax_error as e:
Beispiel #49
0
            u"Negated depency restrictions are not allowed inside OR operators, maybe try to include negation outside the OR operator."
        )


def p_dn_not(t):
    u'''depdef : NEG depdef'''
    t[0] = DeprelNode_Not(t[2])


def p_sn_not(t):
    u'''tokendef : NEG tokendef'''
    t[0] = SetNode_Not(t[2])


lex.lex(reflags=re.UNICODE)
yacc.yacc(write_tables=0, debug=1, method='SLR')

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(description='Expression parser')
    parser.add_argument(
        'expression',
        nargs='+',
        help='Training file name, or nothing for training on stdin')
    args = parser.parse_args()
    e_parser = yacc.yacc(write_tables=0, debug=1, method='LALR')
    for expression in args.expression:

        import logging
        logging.basicConfig(filename='myapp.log', level=logging.INFO)
        log = logging.getLogger()
Beispiel #50
0
import lexer_rules
import parser_rules
import lex
import yacc
import sys

filein = sys.argv[1]
fileout = sys.argv[2]

if filein == "": text = raw_input()
else : 
	text = ""
	filein = open(filein, 'r')
	for line in filein:
		text += line

if fileout == "": fileout = sys.stdout
else: fileout = file.open(fileout, 'w')
	
lexer = lex.lex(module=lexer_rules)
parser = yacc.yacc(module=parser_rules)

ast = parser.parse(text, lexer)

fileout.write(ast)
fileout.write('\n')
Beispiel #51
0
def md2mc(src="None", fname="None"):
    """md2mc converts a markdown source to a machine (mc).
    
       One can feed the markdown in three ways, shown via 
       pseudo-examples:
       
       1) md2mc()
       
          It means you will provide a file-name
          (you will be prompted for one). Then the markdown is read from
          that file. 
          
       2) md2mc(src="<any string S other than 'File'>")
       
          S is now taken as the markdown string and parsed. This is 
          bound to be a multi-line file. 
          
          There is a Jupyter bug that if the parser (or any process) 
          consuming a multi-line input throws an exception, you will get 
          a strange error message: 
          ERROR:root:An unexpected error occurred while tokenizing input
          Ignore it please, and instead spend your time fixing the 
          markdown input. See for details:
          https://github.com/ipython/ipython/issues/6864
          
          
       3) md2mc(src="File", fname="<your file name path>")
       
          Obviously, you should not be feeding a markdown with contents 
          "File". It is not legit markdown syntax. So if src="File", 
          then fname is taken to be the path-name to a file that is 
          opened and read.
        
       In all cases, the returned result is a machine structure (dict).
    """
    if (src == "None"):
        mdstr = open(input('File name ='), 'r').read()
    elif (src == "File"):
        mdstr = open(fname).read()
    else:
        mdstr = src
    myparser = yacc()
    mdlexer = lex()  # Build lexer custom-made for markdown
    rslt = myparser.parse(mdstr, lexer=mdlexer)  # feed into parse fn
    #--
    # Now, based on machine type, return correct machine object.
    #--
    (machine_type, (From_s, To_s, G_in, G_out, Q0, F, Sigma, Dirn,
                    Delta)) = rslt
    #--
    #-- for now, make struct right here; later call right maker
    #--
    if machine_type != 'NFA':
        assert (len(Q0) == 1)
        q0 = list(Q0)[0]
    if machine_type == 'DFA':
        return {
            "Q": From_s | To_s,
            "Sigma": Sigma,
            "Delta": Delta,
            "q0": q0,
            "F": F
        }

    elif machine_type == 'NFA':
        return {
            "Q": From_s | To_s,
            "Sigma": Sigma - {'', ""},
            "Delta": Delta,
            "Q0": Q0,
            "F": F
        }

    elif machine_type == 'PDA':
        G_out_set = reduce(lambda x, y: x | y, map(set, G_out), set({}))
        return {
            "Q": From_s | To_s,
            "Sigma": Sigma - {'', ""},
            "Gamma": (G_in | G_out_set | {'#'} | Sigma) - {'', ""},
            "Delta": Delta,
            "q0": q0,
            "z0": '#',  # Hash-mark is the new "z0" for a PDA!
            "F": set(F)
        }
    else:
        assert (machine_type == 'TM')
        return {
            "Q": From_s | To_s,
            "Sigma": Sigma - {'', "", '@', '.'},
            "Gamma": (G_in | G_out | {'.'} | Sigma) - {'', "", '@'},
            "Delta": Delta,
            "q0": q0,
            "B": '.',
            "F": F
        }

    return rslt
Beispiel #52
0
   Parses the given input string (which must contain IDL code).  If the parsing
   is successful, returns the root of the resulting abstract syntax tree;
   otherwise, returns None.  If debug is true, any syntax errors will
   produce parser debugging output.
   """

    # Reset global state stuff
    error.clear_error_list()
    i2py_map.clear_extra_code()
    lexer.lineno = 1  # This needs to be reset manually (PLY bug?)

    # Ensure that the input contains a final newline (the parser will choke
    # otherwise)
    if input[-1] != '\n':
        input += '\n'

    # Parse input and return the result
    return parser.parse(input, lexer, debug)


#
# Create the parser
#

build_productions()
parser = yacc.yacc(method='LALR',
                   debug=True,
                   tabmodule='ytab',
                   debugfile='y.output',
                   outputdir=os.path.dirname(__file__))
Beispiel #53
0
    elif str_regex.match(t[1]): t[0] = WordNode( t[1] )
    elif sim_regex.match(t[1]): t[0] = SimNode( t[1][1:] )
    elif sub_regex.match(t[1]): t[0] = SubstringNode( t[1][1:-1] )
    elif rt_regex.match(t[1]):  t[0] = TruncNode( t[1][:-1] )
    elif lt_regex.match(t[1]):  t[0] = LTruncNode( t[1][1:] )
    else:             
        if not (t[1].lower().strip() in ('and', 'or', 'not', 'near')):
            t[0] = GlobNode( t[1] )

 
def p_error(t):
    raise QueryParserError,"Syntax error at '%s'" % t.value

import yacc
try:
    yacc.yacc(debug=0)
except:
    from zLOG import LOG, ERROR
    LOG("TextIndexNG", ERROR, "parsetab creation failed") 


class Parser(BaseParser):

    id = 'NewQueryParser'
    parser_description = 'A TextIndex compatible parser (native Python version)'

    def parse(self, query, operator):
        
        try:
            return yacc.parse( query )
        except QueryParserError:
 def build(self, **kwdargs):
     self.parser = yacc.yacc(module=self,
                             start='launchers',
                             logger=self.logger,
                             **kwdargs)
Beispiel #55
0
def init_parser():
    lex.lex(debug = 0, optimize = 1)
    yacc.yacc(debug = 0, optimize = 1)
Beispiel #56
0
 def get_prototype(cls):
     if not cls.prototype:
         instance = cls()
         tabmodule = '%stab' % cls.name
         cls.prototype = yacc.yacc(module=instance, tabmodule=tabmodule)
     return cls.prototype
Beispiel #57
0
logging.basicConfig(
    level = logging.DEBUG,
    filename = "parselog.txt",
    filemode = "w",
    format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()

def run_lexer(strings): 
    lex.input(strings)
    while 1:
        token = lex.token()       # Get a token
        if not token: break        # No more tokens 
def run_parser(strings): 
    result = parser.parse(strings) 
    SymbolTableVisitor().visit(result) 
    CodeGenVisitor().visit(result) 
    #result.show() 

lex.lex()
parser = yacc.yacc(debug=True)#,debuglog=log)
if __name__ == '__main__':
    fpath = "test.de" 
    f = open(fpath,"r") 
    fout = open("test.deout","w")
    file=f.read()
    run_lexer(file) 
    run_parser(file)
    f.close()
    fout.close()
Beispiel #58
0
def p_number(t):
    'number : NUMBER'
    t[0] = t[1]


def p_type_ref(t):
    'type_ref : UCASE_IDENT'
    t[0] = Type_Ref(name=t[1])


def p_error(t):
    raise ParseError(str(t))


yacc.yacc()

# XXX should just calculate dependencies as we go along.  Should be part of prepass, not
# a utility function all back-ends have to call.


def calc_dependencies(node, dict, trace=0):
    if not hasattr(node, '__dict__'):
        if trace: print("#returning, node=", node)
        return
    if node.type == 'Type_Ref':  # XXX
        dict[node.name] = 1
        if trace: print("#Setting", node.name)
        return
    for (a, val) in node.__dict__.items():
        if trace: print("# Testing node ", node, "attr", a, " val", val)
Beispiel #59
0
 def buildparser(self, **kwargs):
     self.parser = yacc.yacc(module=self, **kwargs)