예제 #1
0
파일: parser.py 프로젝트: lexdene/pavel
    def parse(self, source):
        if self._debug:
            self._debug_parse_tokens(source)
            self.__parser = yacc.yacc(module=self)

            debug = 0
        else:
            self.__parser = yacc.yacc(
                module=self,
                debug=False,
                write_tables=False
            )

            debug = 0

        result = self.__parser.parse(
            source,
            lexer=self._create_lexer(),
            debug=debug
        )

        if self._debug:
            import pprint
            pprint.pprint(result, indent=4)

        return result
예제 #2
0
def validate(expression):
  yacc.yacc()
  try:
    yacc.parse(expression)
    return True
  except:
    return False
예제 #3
0
파일: parser.py 프로젝트: dgarant/weasel
    def build_parser(self, debug=False):

        tokens = Lexer.tokens

        def p_start(p):
            '''start : put 
                     | exec 
                     | execsh
                     | ping'''
            p[0] = p[1]

        def p_put(p):
            'put : PUT STRING FILESTR'
            p[0] = ('put', p[2], p[3])

        def p_exec(p):
            'exec : EXEC STRING'
            p[0] = ('exec', p[2])

        def p_execsh(p):
            'execsh : EXECSH STRING'
            p[0] = ('execsh', p[2])

        def p_ping(p):
            'ping : PING'
            p[0] = ('ping',)

        def p_createuser(p):
            'createuser : CREATEUSER STRING STRING'
            p[0] = ('createuser', p[2], p[3])

        if debug:
            return yacc.yacc(debug=True)
        else:
            return yacc.yacc(errorlog = yacc.NullLogger())
예제 #4
0
def main():
    lex.lex()

    cmd, program_file = check_args()

    if program_file is not None and cmd == 'test':
        parser = yacc.yacc()
        with open(program_file) as f:
            input = f.read()
        progcode = parser.parse(input)
        program = Program(progcode)
        program.run_tests()
    elif program_file is not None and cmd == 'lex':
        with open(program_file) as f:
            input = f.read()
        lex.input(input)
        while True:
            tok = lex.token()
            if not tok:
                break
            print tok
    elif program_file is not None:
        parser = yacc.yacc()
        with open(program_file) as f:
            input = f.read()
        progcode = parser.parse(input)
        program = Program(progcode)
        program.call_function('main', [5])
예제 #5
0
 def build_parser(self):
     """
     """
     yacc.yacc(module=self,
               debug=self.debug,
               debugfile=self.debugfile,
               tabmodule=self.tabmodule)
예제 #6
0
def compile(filename):
    from parser.preprocess import preprocess
    logger = yacc.NullLogger()
    yacc.yacc()
    data =preprocess(get_input(filename))
    print data
    ast =  yacc.parse(data,lexer = lex.lex(),debug=1)   
예제 #7
0
    def __init__(self, **kw):
        self.debug = kw.get('debug', 0)
        self.filename = kw.get('filename', None)
        self.names = { }

        try:
            modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
        except:
            modname = "parser"+"_"+self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"
        self.outputdir = os.path.dirname(__file__)

        # Build the lexer and parser
        lex.lex(module=self, debug=self.debug)
        # We can't assume that we can write to this directory (e.g., we've
        # been installed as a module). So go ahead and skip writing out. Our
        # grammers are simple enough that this shouldn't be much of a problem,
        # we just regenerate the grammer each time.
        #yacc.yacc(module=self,
        #          debug=self.debug,
        #          debugfile=self.debugfile,
        #          tabmodule=self.tabmodule,
        #          outputdir=self.outputdir,
        #          method='SLR')
        yacc.yacc(module=self, debug=self.debug, write_tables=0, method='SLR')
예제 #8
0
 def scan(self):
     global build_errors,cuadruplos,ids,temp_counter,counter,types,values,pOper,pilaO,braces,pSaltos
     #Lex construction
     import ply.lex as lex
     lex.lex()
     #Sintax construction
     import ply.yacc as yacc
     yacc.yacc()
     del build_errors[:]
     #Structure cleaning
     ids.dispatch()
     types.dispatch()
     values.dispatch()
     pOper.dispatch()
     pilaO.dispatch()
     braces.dispatch()
     pSaltos.dispatch()
     cuadruplos.clear()
     #Counters restart
     temp_counter = 0
     counter = 0
     #Compiling entry
     yacc.parse(self.entrada)
     #Return the build error's array or null in case there weren't any
     return build_errors
예제 #9
0
def compile(filename):
    logger = yacc.NullLogger()
    yacc.yacc()
    data =preprocess(get_input(filename))
    print data
    ast =  yacc.parse(data,lexer = lex.lex(),debug=1)   
    return ast
    def __init__(self, filenameargument, **kw):

        logging.info("Parsing file \"" + str(filenameargument) + "\"")

        ## this block is magic from calc.py example I do not understand (Voit)
        self.debug = kw.get('debug', 0)
        self.names = { }
        try:
            modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
        except:
            modname = "parser"+"_"+self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"
        #print self.debugfile, self.tabmodule


        # Build the lexer and parser
        lex.lex(module=self, debug=0)
        yacc.yacc(module=self,
                  debug=self.debug,
                  debugfile=self.debugfile,
                  tabmodule=self.tabmodule)

        ## store in lexer object
        logging.debug("initializing seconds to -1")
        self._seconds = -1
        logging.debug("initializing numerrors to 0")
        self._numerrors = 0
        logging.debug("initializing filename to ["+str(filenameargument) + "]")
        self._filename = str(filenameargument)
        
        f=open(self._filename, 'r');
        self._totalLines = sum(1 for line in f)
예제 #11
0
파일: tree_grammar.py 프로젝트: sbeamer/asp
def parse(tree_grammar, global_dict, checker=None):
    import ply.yacc as yacc
    yacc.yacc()
    result = yacc.parse(tree_grammar)

    parent_map = defaultdict(lambda: 'ast.AST')
    for rule in result:
        rule_map = rule.get_parent_map()
        assert len(set(parent_map.keys()) & set(rule_map.keys())) == 0, 'Same class occured in two alternative rules, but can only have one base class'
        parent_map.update(rule_map)

    program = "import copy\n"

    classes_with_rules = []
    all_classes = []
    for rule in result:
        classes_with_rules.append(rule.name)
        all_classes.extend(rule.get_classes())
    all_classes = set(filter(lambda x: not("." in x), all_classes))
    classes_with_rules = set(classes_with_rules)

    for rule in result:
        program += rule.generate(parent_map, all_classes)

    for x in all_classes - classes_with_rules:
        program += '''
class %s(%s):
    def __init__(self):
        super(%s, self).__init__()
''' % (x, parent_map[x], x)

    if checker != None:
        program = "import ast\n" + program + "\n" + generate_checker_class(checker, classes_with_rules) + "\n"

    exec(program, global_dict)
예제 #12
0
 def __init__(self,loglevel=logging.CRITICAL,logfile=''):
     self._log=self.initLogger(loglevel,logfile)
     self._reserved_map={}
     for r in RuleParser.reserved:
         self._reserved_map[r.upper()] = r
     lex.lex(module=self)
     yacc.yacc(module=self)
예제 #13
0
 def __init__(self, **kw):
     self.debug = kw.get('debug', 0)
     self.results = {}
     try:
         modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
     except:
         modname = "parser"+"_"+self.__class__.__name__
     # self.debugfile = modname + ".dbg"
     self.tabmodule = modname + "_" + "parsetab"
     #print self.debugfile, self.tabmodule
     # Build the lexer and parser
     lex.lex(
             module=self,
             #debug=self.debug
             optimize=True,
             debug=False
             )
     yacc.yacc(
             module=self,
             debug=False,
             write_tables=False,
             #debug=self.debug,
             #debugfile=self.debugfile,
             tabmodule=self.tabmodule
             )   
예제 #14
0
def parse_file_to_ast(file_path):

  if len(queue) > 0:
    current_dir = os.path.dirname(queue[-1])
  else:
    current_dir = ""

  full_path_string = os.path.abspath(os.path.join(current_dir, file_path))

  if full_path_string in parsed_set:
    return []

  parsed_set.add(full_path_string)
  queue.append(full_path_string)

  import ply.lex as lex
  import ply.yacc as yacc

  lex.lex(nowarn=1)
  yacc.yacc(debug=False, tabmodule="_preprocessor", outputdir=ODIR)

  reader = open(full_path_string, 'r')
  input_string = reader.read()
  reader.close()
  ast = yacc.parse(input_string)

  update_cleanup()
  return ast
예제 #15
0
    def __init__(self, line, **kw):
        self.debug = kw.get('debug', 0)
        self.line = line
        self.searchtree = []
        self.numcases = 1

        try:
            modname = os.path.split(os.path.splitext(__file__)[0])[1] + "_" + self.__class__.__name__
        except:
            modname = "parser"+"_"+self.__class__.__name__
        self.debugfile = modname + ".dbg"
        self.tabmodule = modname + "_" + "parsetab"
        #print self.debugfile, self.tabmodule
        #self.debug = True

        # Build the lexer and parser
        lex.lex(module=self, debug=self.debug)
        yacc.yacc(module=self,
                  debug=self.debug,
                  debugfile=self.debugfile,
                  tabmodule=self.tabmodule)

        yacc.parse(self.line)

        for s in self.searchtree:
            if isinstance(s, SearchSubSpace):
                self.numcases *= s.size
예제 #16
0
    def build_parser( cls, start_symbol, debug):

        import logging
        if not os.path.exists('/tmp/mflog'):
            os.makedirs('/tmp/mflog')
        logging.basicConfig(
            level = logging.DEBUG,
            filename = "/tmp/mflog/parselog.txt",
            filemode = "w",
            format = "%(filename)10s:%(lineno)4d:%(message)s"
        )
        log = logging.getLogger()



        username = '******' % os.getuid()
        tables_loc =  EnsureExisits("/tmp/%s/nu/yacc/parse_eqn_block" % username)

        if debug:
            parser = yacc.yacc( debug=debug, start=start_symbol,  tabmodule="neurounits_parsing_parse_eqn_block", outputdir=tables_loc,optimize=1  )
        else: 
            parser = yacc.yacc( debug=debug, start=start_symbol,  tabmodule="neurounits_parsing_parse_eqn_block", outputdir=tables_loc,optimize=1, errorlog=ply.yacc.NullLogger()  )

        #with open("/tmp/neurounits_grammar.txt",'w') as f:
        #    for p in parser.productions:
        #        f.write( "%s\n" %p)



        return parser
예제 #17
0
def leer_archivo_yacc():
    '''
    Abre el archivo y lee linea a linea poniendo las
    cadenas de caracteres de cada linea en la intrada
    del analizador lexico
    '''

    yacc.yacc(method='LALR')

    s = raw_input(">> ")

    with open('archivos_prueba_parser/'+s+'.txt') as linea:
        datos = linea.read()
        raiz = yacc.parse(datos)
        raiz.imprimir(' ')


    '''
    Loop infinido que muestra los token's reconocidos por
    el analizador sintactico dando informacion de:
    tipo de token
    valor del token
    fila donde fue leido
    columna donde fue leido
    cuando no encuentra mas token's en la entrada termina
    la iteracion
    '''
    '''while True:
예제 #18
0
파일: miniep5.py 프로젝트: andredalton/bcc
 def __init__(self, transmite_erros=False):
     """ Inicializador, pode receber True como segundo argumento permitindo o não tratamento de erros. """
     self.transmite_erros = transmite_erros  # Caso queira rodar por linha de comando os erros são repassados ao invés de tratados.
     self.resposta = None                    # Resposta 
     self.names = { }                        # Nomes de variáveis
     lex.lex(module=self)
     yacc.yacc(module=self)
예제 #19
0
def xml_parse(data):

    _debug_header('INPUT')
    _debug_print_('INPUT', data)
    _debug_footer('INPUT')

    # Tokenizer
    xml_lexer = XmlLexer()
    xml_lexer.build()

    _debug_header('LEXER')
    xml_lexer.test(data)
    _debug_footer('LEXER')

    # Parser
    global tokens
    tokens = XmlLexer.tokens

    yacc.yacc(method="SLR")

    _debug_header('PARSER')
    root = yacc.parse(data, lexer=xml_lexer.lexer, debug=False)
    _debug_footer('PARSER')

    _debug_header('OUTPUT')
    _debug_print_('OUTPUT', root)
    _debug_footer('OUTPUT')

    return root
예제 #20
0
파일: Induce.py 프로젝트: gageholden/Pyam
def terminal():
    #This method handles what is performed when Induce is called from the terminal or main function
    #The communication options are the key here
    
    # Define a dictionary: a default set of stuff to do with one keypress
    opts, detupler = getopt.getopt(sys.argv[1:], "nhmgl:r:s:", ["node", "hist",\
    "match", "gen", "learningrate=", "rounds="])
    
    global outputSettings

    for o,a in opts:
        if o in ("-n", "--node"):
            outputSettings['node'] = True
        if o in ("-h", "--hist"):
            outputSettings['hist'] = True
        if o in ("-m", "--match"):
            outputSettings['match'] = True
        if o in ("-g", "--gen"):
            outputSettings['gen'] = True
    
    import ply.yacc as yacc
    yacc.yacc()

    while 1:
        try:
            s = read_line("> ")
        except EOFError:
            break
        if not s: continue
        yacc.parse(s)
예제 #21
0
파일: parse.py 프로젝트: odkq/zerodoc
def parse(s):
    ''' Return the syntax tree for a preloaded string '''
    l = lex.lex(optimize=1, debug=0)
    yacc.yacc(optimize=1, debug=0)
    p, e = preprocess(s)
    if p == None:
        print 'ERROR: ' + e
        return None
    doc = yacc.parse(p)
    # First, adjust 'named' sections from the template
    adjust_sections(doc)
    # Extract links from source and put them in their tree
    process_links(doc)
    # Identify text attributes
    extract_attributes(doc)
    # Parsetab by default is generated on the current directory
    # This is not desirable at all (the directory can be read-only
    # and a program should not write spureous files on its cwd)
    # But, until i figure out a better way to put the parsetab
    # in the installation dir (wich would be the best) just remove
    # it
    if os.path.exists('parsetab.py'):
        os.remove('parsetab.py')
    if os.path.exists('lextab.py'):
        os.remove('lextab.py')
    return doc, None
예제 #22
0
def parseFPGAEnvironment (environmentFile):
    # build the compiler
    lex.lex()
    yacc.yacc()
    environmentDescription = (open(environmentFile, 'r')).read()
    environment = yacc.parse(environmentDescription)
    return environment
예제 #23
0
def textEditorParseMe(filename):
    
    tokens = ['FILENAME', 'NUMBERSEQUENCE']
    
    def t_FILENAME(t):
        r'[a-zA-Z_/.][a-zA-Z0-9_/.]*'
        return t
    
    def t_NUMBERSEQUENCE(t):
        r'[0-9 :]+'
        return t
        
    t_ignore = '\t: '
    
    def t_newline(t):
        r'\n+'
        t.lexer.lineno += t.value.count("\n")
        
    def t_error(t):
        print "Illegal character '%s'" % t.value[0]
        t.lexer.skip(1)
        
    lex.lex()
    
    count = []
    latency = []
    organized = {}
    
    def p_sentence(p):
      '''sentence : FILENAME NUMBERSEQUENCE'''
      tmp1 = []
      tmp = p[2].split(':')
      for x in tmp:
        x = x.strip()
        tmp1.append(x)
      organized[int(tmp1[0])] = tmp1[1].split(' ')
          
          
        
        
        

    def p_error(p):
      if p:
          print("Syntax error at '%s'" % p.value)
      else:
          print("Syntax error at EOF")

        
    yacc.yacc()
    
    file = open(filename, 'r')
    while file:
        line = file.readline()
        if not line : break
        yacc.parse(line[0:-1])
        
        
    return organized
예제 #24
0
def parse(stringToParse) :
	yacc.yacc()
	return yacc.parse(stringToParse)

#string1 = "i := 5; define testfunc proc (n) return := n - 3 end; q := testfunc(i)"
#string1 = "i := 5; n := i * 3"

#compile(string1)
예제 #25
0
파일: msr_parser.py 프로젝트: sllam/msre-py
def run_parser(file):
	lex.lex()
	f = open(file)
	yacc.yacc()
	input = f.read()
	# print "\033[41m" + input[94:134] + "\033[0m Ha ha"
	ast = yacc.parse(input, tracking=True)
	return (input, ast)
 def __init__(self):
     self.lexer = BIRLexer()
     self.tokens = self.lexer.tokens
     self.inst_parser = yacc.yacc(module=self, write_tables=0, 
                                  debug=False, start='arith_exp',
                                 errorlog=yacc.NullLogger())
     self.cond_parser = yacc.yacc(module=self, write_tables=0, 
                                  debug=False, start='bool_exp')
예제 #27
0
def compile(filename):
    logger = yacc.NullLogger()
    yacc.yacc()
    data ="<<cLkingshuk_STARTFILE_dontUseThisVariable2731990>>" + '\n'+(get_input(filename).strip('\n').strip())
    data=re.sub(r'#.*?\n',r'\n',data)
    data=re.sub(r'#.*?\Z','',data)
    ast =  yacc.parse(data,lexer = lex.lex(),debug=1)   
    return ast
예제 #28
0
def Parse(source, filename):
  lexer = Lexer(filename)
  parser = Parser(lexer, source, filename)

  lex.lex(object=lexer)
  yacc.yacc(module=parser, debug=0, write_tables=0)

  tree = yacc.parse(source)
  return tree
예제 #29
0
def Parse(filename):
  lexer = Lexer()
  parser = Parser(lexer)

  lex.lex(object=lexer)
  yacc.yacc(module=parser, debug=0, write_tables=0)

  tree = yacc.parse(open(filename).read())
  return tree
예제 #30
0
파일: tgrep.py 프로젝트: Oneplus/cnccgbank
def initialise():
    '''Performs lazy initialisation of the lexer and parser. Once called, further calls are no-ops.'''
    global _tgrep_initialised
    
    if not _tgrep_initialised:
        lex.lex(module=parse)
        yacc.yacc(module=parse)
    
        _tgrep_initialised = True
import sys

from constants import *
from statics import findn
import turtlebot_instructions as turtlebot

import traceback

import time

import tf
import publisher

lexer = lex.lex(module=lexerIG)
parser = yacc.yacc(module=parserIG)


class CancelTracker(object):

	def __init__(self):
		self._canceled = False

	def is_canceled(self):
		return self._canceled

	def cancel(self):
		self._canceled = True

class IGServer(object):
	_feedback = ig_action_msgs.msg.InstructionGraphFeedback()
예제 #32
0
 def build(self, **kwargs):
     self.lexer = Lexer()
     self.lexer.build()
     self.parser = yacc.yacc(module=self, **kwargs)
예제 #33
0
    ' ex : ex MUL ex '
    p[0] = p[2]
    p[0] += p[1]
    p[0] += p[3]


def p_ex_DIV(p):
    ' ex : ex DIV ex '
    p[0] = p[2]
    p[0] += p[1]
    p[0] += p[3]


def p_ex_POW(p):
    ' ex : ex POW ex '
    p[0] = p[2]
    p[0] += p[1]
    p[0] += p[3]


def t_error(t):
    print 'lexer/error', t


def p_error(p):
    print 'parse/error', p


lex.lex()
yacc.yacc(debug=False, write_tables=False).parse(sys.stdin.read())
예제 #34
0
    p[0] = RME(And(), [], And())


from ivy_logic_parser import *


def p_error(token):
    if token is not None:
        report_error(ParseError(token.lineno, token.value, "syntax error"))
    else:
        report_error(ParseError(None, None, 'unexpected end of input'))


# Build the parsers
parser = yacc.yacc(start='top',
                   tabmodule='ivy_parsetab',
                   errorlog=yacc.NullLogger())
#parser = yacc.yacc(start='top',tabmodule='ivy_parsetab')
# formula_parser = yacc.yacc(start = 'fmla', tabmodule='ivy_formulatab')


def parse(s, nested=False):
    global error_list
    global stack
    if not nested:
        error_list = []
        stack = []
    vernum = iu.get_numeric_version()
    with LexerVersion(vernum):
        # shallow copy the parser and lexer to try for re-entrance (!!!)
        res = copy.copy(parser).parse(s, lexer=copy.copy(lexer))
예제 #35
0
    header = '<thead>' + p[2] + '</thead>\n'
    body = '<tbody>' + p[3] + '</tbody>\n'
    p[0] = '<informaltable><tgroup cols="' + str(
        len(colfrac
            )) + '">' + colspec + header + body + '</tgroup></informaltable>'
    parser_verbose(p)


def p_error(t):
    print('parse error at line %d, token %s, next token %s' %
          (t.lineno, t, parser.token()),
          file=sys.stderr)
    exit(1)


parser = yacc.yacc(start='input')

#
#
#


def main(file):
    content = file.read()
    content = remove_noncomments(content)
    processed = process(content)
    perform(processed)

    # output the XML tree
    s = lxml.etree.tostring(rootelement, pretty_print=True)
예제 #36
0
def make_parser():
    parser = yacc.yacc()
    return parser
예제 #37
0
파일: gas.py 프로젝트: xorpse/amoco
 def build(self, **kargs):
     opt = dict(debug=0, write_tables=0)
     opt.update(**kargs)
     if _has_ply:
         self._parser = yacc.yacc(module=self, **opt)
예제 #38
0
import ply.lex as lex
import ply.yacc as yacc
from Web import graphics as graphics
from Web import jstokens
from Web import jsgrammar
from Web import jsinterp
from Web import htmltokens
from Web import htmlgrammar

# Load up the lexers and parsers that you have already written in
# previous assignments. Do not worry about the "module" or
# "tabmodule" arguments -- they handle storing the JavaScript
# and HTML rules separately.
htmllexer = lex.lex(module=htmltokens)
htmlparser = yacc.yacc(module=htmlgrammar, tabmodule="parsetabhtml")
jslexer = lex.lex(module=jstokens)
jsparser = yacc.yacc(module=jsgrammar, tabmodule="parsetabjs")


# The heart of our browser: recursively interpret an HTML abstract
# syntax tree.
def interpret(ast):  # 用来解释语法树的一个玩意
    for node in ast:
        nodetype = node[0]  # node的类型
        if nodetype == "word-element":
            graphics.word(node[1])
        elif nodetype == "tag-element":
            tagname = node[1]
            # tag的名字
            tagargs = node[2]
예제 #39
0
def p_dlt_conditions(p):
    'dlt_conditions : conditions'
    p[0] = Conditions(p.lineno(1), p[1])


def p_dlt_actions(p):
    'dlt_actions : actions'
    p[0] = Actions(p[1])


def p_error(t):
    print("p_error", t)
    syntax_error("p_error: Syntax Error", t.lexpos, t.lineno)


parser = yacc.yacc()  # (start='opmode')


def parse_opmode(filename, debug=False):
    global Rootdir

    full_name = os.path.abspath(filename)
    dirname = os.path.dirname(full_name)
    path = [dirname]
    Rootdir = os.path.dirname(dirname)
    libsdir = os.path.join(Rootdir, "libs")
    if os.path.isdir(libsdir):
        for entry in os.scandir(libsdir):
            if entry.is_dir():
                path.append(entry.path)
    #print("path", path)
예제 #40
0
파일: parser.py 프로젝트: cheaterok/Stampa
    p[0] = ''


def p_arg_int(p):
    """
    arg : INT
    """
    p[0] = int(p[1])


def p_arg_float(p):
    """
    arg : FLOAT
    """
    p[0] = float(p[1])


def p_arg_other(p):
    """
    arg : STR
        | funccall
    """
    p[0] = p[1]


def p_error(p):
    print('Unexpected token:', p)


parser = yacc.yacc()
예제 #41
0
def p_empty(p):
    '''empty : '''


# Catastrophic error handler


def p_error(p):
    if p:
        print("Syntax error at token", p.type, "line", p.lineno, ":", p.value)
    else:
        print("SYNTAX ERROR AT EOF")


parser = yacc.yacc(tabmodule='asterism_parsetab',
                   write_tables=False,
                   debug=False)


def parse(data, debug=0):
    parser.error = 0
    p = parser.parse(data, lexer=lexer, debug=debug)
    if parser.error:
        return None
    return p


def create_asterism(universe, name, text_segments):
    segments = []
    for text_segment in text_segments:
        segment = []
예제 #42
0
파일: main.py 프로젝트: tincho4t/tleng
                    % (p.value, s[0:p.lineno], s[p.lineno], s[p.lineno+1:])
            else:
                error = "Error en el caracter '%s'. Contexto: '%s'" \
                    % (p.value, s[0:p.lineno])
        else:
            error = "Error en el caracter '%s' en la posicion %d." \
                % (p.value, p.lineno)
    else:  # en algunos casos p no viene definido y no hay mucha mas
        #  informacion para mostrar
        error = "Error de sintaxis."
    raise SyntaxError(error)


import ply.yacc as yacc

yacc.yacc()

#==========================================
#
# TESTS
#
#==========================================


def test():
    # Casos que tiene que tiene que reconocer
    assert test_accept('(a_5-c/b-1)-c')
    assert test_accept('{a^5-c/b}-c')
    assert test_accept('{a^{5^6}-c_{{k^9}}/b_i}-c')
    assert test_accept('(10+5/2)')
    assert test_accept('1_2^{3_4^{5_6^7}}')
예제 #43
0
    '''vector : LPAREN NUMBER COMMA NUMBER COMMA NUMBER COMMA NUMBER RPAREN
	          | LPAREN NUMBER COMMA NUMBER COMMA NUMBER RPAREN'''
    if len(p) == 10:
        p[0] = '(' + str(p[2]) + ' ' + str(p[4]) + ' ' + str(p[6]) + ' ' + str(
            p[8]) + ')'
    else:
        p[0] = '(' + str(p[2]) + ' ' + str(p[4]) + ' ' + str(p[6]) + ')'


def p_texture(p):
    '''texture : STRING LBRACE RBRACE'''
    p[0] = p[1]


# an empty production, makes rules clearer
# def p_empty(p):
# 	'empty :'
# 	pass


def p_error(p):
    print("Syntax error in input! {}".format(p))


# Build the parser
parser = yacc.yacc(debug=False)


def parse(data):
    return parser.parse(data)
예제 #44
0
파일: parser.py 프로젝트: weshayutin/hubtty
def SearchParser():
    precedence = (  # NOQA
        ('left', 'NOT', 'NEG'), )

    def p_terms(p):
        '''expression : list_expr
                      | paren_expr
                      | boolean_expr
                      | negative_expr
                      | term'''
        p[0] = p[1]

    def p_list_expr(p):
        '''list_expr : expression expression'''
        p[0] = and_(p[1], p[2])

    def p_paren_expr(p):
        '''paren_expr : LPAREN expression RPAREN'''
        p[0] = p[2]

    def p_boolean_expr(p):
        '''boolean_expr : expression AND expression
                        | expression OR expression'''
        if p[2].lower() == 'and':
            p[0] = and_(p[1], p[3])
        elif p[2].lower() == 'or':
            p[0] = or_(p[1], p[3])
        else:
            raise hubtty.search.SearchSyntaxError("Boolean %s not recognized" %
                                                  p[2])

    def p_negative_expr(p):
        '''negative_expr : NOT expression
                         | NEG expression'''
        p[0] = not_(p[2])

    def p_term(p):
        '''term : age_term
                | recentlyseen_term
                | change_term
                | owner_term
                | reviewer_term
                | commit_term
                | project_term
                | projects_term
                | project_key_term
                | branch_term
                | ref_term
                | label_term
                | message_term
                | comment_term
                | has_term
                | is_term
                | status_term
                | file_term
                | path_term
                | limit_term
                | op_term'''
        p[0] = p[1]

    def p_string(p):
        '''string : SSTRING
                  | DSTRING
                  | USTRING'''
        p[0] = p[1]

    def p_age_term(p):
        '''age_term : OP_AGE NUMBER string'''
        now = datetime.datetime.utcnow()
        delta = p[2]
        unit = p[3]
        delta = age_to_delta(delta, unit)
        p[0] = hubtty.db.change_table.c.updated < (
            now - datetime.timedelta(seconds=delta))

    def p_recentlyseen_term(p):
        '''recentlyseen_term : OP_RECENTLYSEEN NUMBER string'''
        # A hubtty extension
        delta = p[2]
        unit = p[3]
        delta = age_to_delta(delta, unit)
        s = select([
            func.datetime(func.max(hubtty.db.change_table.c.last_seen),
                          '-%s seconds' % delta)
        ],
                   correlate=False)
        p[0] = hubtty.db.change_table.c.last_seen >= s

    def p_change_term(p):
        '''change_term : OP_CHANGE CHANGE_ID
                       | OP_CHANGE NUMBER'''
        if type(p[2]) == int:
            p[0] = hubtty.db.change_table.c.number == p[2]
        else:
            p[0] = hubtty.db.change_table.c.change_id == p[2]

    def p_owner_term(p):
        '''owner_term : OP_OWNER string'''
        if p[2] == 'self':
            account_id = p.parser.account_id
            p[0] = hubtty.db.account_table.c.id == account_id
        else:
            p[0] = or_(hubtty.db.account_table.c.username == p[2],
                       hubtty.db.account_table.c.email == p[2],
                       hubtty.db.account_table.c.name == p[2])

    def p_reviewer_term(p):
        '''reviewer_term : OP_REVIEWER string
                         | OP_REVIEWER NUMBER'''
        filters = []
        filters.append(hubtty.db.approval_table.c.change_key ==
                       hubtty.db.change_table.c.key)
        filters.append(hubtty.db.approval_table.c.account_key ==
                       hubtty.db.account_table.c.key)
        try:
            number = int(p[2])
        except:
            number = None
        if number is not None:
            filters.append(hubtty.db.account_table.c.id == number)
        elif p[2] == 'self':
            account_id = p.parser.account_id
            filters.append(hubtty.db.account_table.c.id == account_id)
        else:
            filters.append(
                or_(hubtty.db.account_table.c.username == p[2],
                    hubtty.db.account_table.c.email == p[2],
                    hubtty.db.account_table.c.name == p[2]))
        s = select([hubtty.db.change_table.c.key],
                   correlate=False).where(and_(*filters))
        p[0] = hubtty.db.change_table.c.key.in_(s)

    def p_commit_term(p):
        '''commit_term : OP_COMMIT string'''
        filters = []
        filters.append(hubtty.db.revision_table.c.change_key ==
                       hubtty.db.change_table.c.key)
        filters.append(hubtty.db.revision_table.c.commit == p[2])
        s = select([hubtty.db.change_table.c.key],
                   correlate=False).where(and_(*filters))
        p[0] = hubtty.db.change_table.c.key.in_(s)

    def p_project_term(p):
        '''project_term : OP_PROJECT string'''
        if p[2].startswith('^'):
            p[0] = func.matches(p[2], hubtty.db.project_table.c.name)
        else:
            p[0] = hubtty.db.project_table.c.name == p[2]

    def p_projects_term(p):
        '''projects_term : OP_PROJECTS string'''
        p[0] = hubtty.db.project_table.c.name.like('%s%%' % p[2])

    def p_project_key_term(p):
        '''project_key_term : OP_PROJECT_KEY NUMBER'''
        p[0] = hubtty.db.change_table.c.project_key == p[2]

    def p_branch_term(p):
        '''branch_term : OP_BRANCH string'''
        if p[2].startswith('^'):
            p[0] = func.matches(p[2], hubtty.db.change_table.c.branch)
        else:
            p[0] = hubtty.db.change_table.c.branch == p[2]

    def p_ref_term(p):
        '''ref_term : OP_REF string'''
        if p[2].startswith('^'):
            p[0] = func.matches(
                p[2], 'refs/heads/' + hubtty.db.change_table.c.branch)
        else:
            p[0] = hubtty.db.change_table.c.branch == p[2][len('refs/heads/'):]

    label_re = re.compile(
        r'(?P<label>[a-zA-Z0-9_-]+([a-zA-Z]|((?<![-+])[0-9])))'
        r'(?P<operator>[<>]?=?)(?P<value>[-+]?[0-9]+)'
        r'($|,(user=)?(?P<user>\S+))')

    def p_label_term(p):
        '''label_term : OP_LABEL string'''
        args = label_re.match(p[2])
        label = args.group('label')
        op = args.group('operator') or '='
        value = int(args.group('value'))
        user = args.group('user')

        filters = []
        filters.append(hubtty.db.approval_table.c.change_key ==
                       hubtty.db.change_table.c.key)
        filters.append(hubtty.db.approval_table.c.category == label)
        if op == '=':
            filters.append(hubtty.db.approval_table.c.value == value)
        elif op == '>=':
            filters.append(hubtty.db.approval_table.c.value >= value)
        elif op == '<=':
            filters.append(hubtty.db.approval_table.c.value <= value)
        if user is not None:
            filters.append(hubtty.db.approval_table.c.account_key ==
                           hubtty.db.account_table.c.key)
            if user == 'self':
                filters.append(
                    hubtty.db.account_table.c.id == p.parser.account_id)
            else:
                filters.append(
                    or_(hubtty.db.account_table.c.username == user,
                        hubtty.db.account_table.c.email == user,
                        hubtty.db.account_table.c.name == user))
        s = select([hubtty.db.change_table.c.key],
                   correlate=False).where(and_(*filters))
        p[0] = hubtty.db.change_table.c.key.in_(s)

    def p_message_term(p):
        '''message_term : OP_MESSAGE string'''
        filters = []
        filters.append(hubtty.db.revision_table.c.change_key ==
                       hubtty.db.change_table.c.key)
        filters.append(hubtty.db.revision_table.c.message.like('%%%s%%' %
                                                               p[2]))
        s = select([hubtty.db.change_table.c.key],
                   correlate=False).where(and_(*filters))
        p[0] = hubtty.db.change_table.c.key.in_(s)

    def p_comment_term(p):
        '''comment_term : OP_COMMENT string'''
        filters = []
        filters.append(hubtty.db.revision_table.c.change_key ==
                       hubtty.db.change_table.c.key)
        filters.append(hubtty.db.revision_table.c.message == p[2])
        revision_select = select([hubtty.db.change_table.c.key],
                                 correlate=False).where(and_(*filters))
        filters = []
        filters.append(hubtty.db.revision_table.c.change_key ==
                       hubtty.db.change_table.c.key)
        filters.append(hubtty.db.comment_table.c.revision_key ==
                       hubtty.db.revision_table.c.key)
        filters.append(hubtty.db.comment_table.c.message == p[2])
        comment_select = select([hubtty.db.change_table.c.key],
                                correlate=False).where(and_(*filters))
        p[0] = or_(hubtty.db.change_table.c.key.in_(comment_select),
                   hubtty.db.change_table.c.key.in_(revision_select))

    def p_has_term(p):
        '''has_term : OP_HAS string'''
        #TODO: implement star
        if p[2] == 'draft':
            filters = []
            filters.append(hubtty.db.revision_table.c.change_key ==
                           hubtty.db.change_table.c.key)
            filters.append(hubtty.db.message_table.c.revision_key ==
                           hubtty.db.revision_table.c.key)
            filters.append(hubtty.db.message_table.c.draft == True)
            s = select([hubtty.db.change_table.c.key],
                       correlate=False).where(and_(*filters))
            p[0] = hubtty.db.change_table.c.key.in_(s)
        else:
            raise hubtty.search.SearchSyntaxError(
                'Syntax error: has:%s is not supported' % p[2])

    def p_is_term(p):
        '''is_term : OP_IS string'''
        #TODO: implement draft
        account_id = p.parser.account_id
        if p[2] == 'reviewed':
            filters = []
            filters.append(hubtty.db.approval_table.c.change_key ==
                           hubtty.db.change_table.c.key)
            filters.append(hubtty.db.approval_table.c.value != 0)
            s = select([hubtty.db.change_table.c.key],
                       correlate=False).where(and_(*filters))
            p[0] = hubtty.db.change_table.c.key.in_(s)
        elif p[2] == 'open':
            p[0] = hubtty.db.change_table.c.status.notin_(
                ['MERGED', 'ABANDONED'])
        elif p[2] == 'closed':
            p[0] = hubtty.db.change_table.c.status.in_(['MERGED', 'ABANDONED'])
        elif p[2] == 'submitted':
            p[0] = hubtty.db.change_table.c.status == 'SUBMITTED'
        elif p[2] == 'merged':
            p[0] = hubtty.db.change_table.c.status == 'MERGED'
        elif p[2] == 'abandoned':
            p[0] = hubtty.db.change_table.c.status == 'ABANDONED'
        elif p[2] == 'owner':
            p[0] = hubtty.db.account_table.c.id == account_id
        elif p[2] == 'starred':
            p[0] = hubtty.db.change_table.c.starred == True
        elif p[2] == 'held':
            # A hubtty extension
            p[0] = hubtty.db.change_table.c.held == True
        elif p[2] == 'reviewer':
            filters = []
            filters.append(hubtty.db.approval_table.c.change_key ==
                           hubtty.db.change_table.c.key)
            filters.append(hubtty.db.approval_table.c.account_key ==
                           hubtty.db.account_table.c.key)
            filters.append(hubtty.db.account_table.c.id == account_id)
            s = select([hubtty.db.change_table.c.key],
                       correlate=False).where(and_(*filters))
            p[0] = hubtty.db.change_table.c.key.in_(s)
        elif p[2] == 'watched':
            p[0] = hubtty.db.project_table.c.subscribed == True
        else:
            raise hubtty.search.SearchSyntaxError(
                'Syntax error: is:%s is not supported' % p[2])

    def p_file_term(p):
        '''file_term : OP_FILE string'''
        if p[2].startswith('^'):
            p[0] = and_(
                or_(func.matches(p[2], hubtty.db.file_table.c.path),
                    func.matches(p[2], hubtty.db.file_table.c.old_path)),
                hubtty.db.file_table.c.status is not None)
        else:
            file_re = '(^|.*/)%s(/.*|$)' % re.escape(p[2])
            p[0] = and_(
                or_(func.matches(file_re, hubtty.db.file_table.c.path),
                    func.matches(file_re, hubtty.db.file_table.c.old_path)),
                hubtty.db.file_table.c.status is not None)

    def p_path_term(p):
        '''path_term : OP_PATH string'''
        if p[2].startswith('^'):
            p[0] = and_(
                or_(func.matches(p[2], hubtty.db.file_table.c.path),
                    func.matches(p[2], hubtty.db.file_table.c.old_path)),
                hubtty.db.file_table.c.status is not None)
        else:
            p[0] = and_(
                or_(hubtty.db.file_table.c.path == p[2],
                    hubtty.db.file_table.c.old_path == p[2]),
                hubtty.db.file_table.c.status is not None)

    def p_status_term(p):
        '''status_term : OP_STATUS string'''
        if p[2] == 'open':
            p[0] = hubtty.db.change_table.c.status.notin_(
                ['MERGED', 'ABANDONED'])
        elif p[2] == 'closed':
            p[0] = hubtty.db.change_table.c.status.in_(['MERGED', 'ABANDONED'])
        else:
            p[0] = hubtty.db.change_table.c.status == p[2].upper()

    def p_limit_term(p):
        '''limit_term : OP_LIMIT NUMBER'''
        # TODO: Implement this.  The sqlalchemy limit call needs to be
        # applied to the query operation and so can not be returned as
        # part of the production here.  The information would need to
        # be returned out-of-band.  In the mean time, since limits are
        # not as important in hubtty, make this a no-op for now so
        # that it does not produce a syntax error.
        p[0] = (True == True)

    def p_op_term(p):
        'op_term : OP'
        raise SyntaxError()

    def p_error(p):
        if p:
            raise hubtty.search.SearchSyntaxError(
                'Syntax error at "%s" in search string "%s" (col %s)' %
                (p.lexer.lexdata[p.lexpos:], p.lexer.lexdata, p.lexpos))
        else:
            raise hubtty.search.SearchSyntaxError(
                'Syntax error: EOF in search string')

    return yacc.yacc(debug=0, write_tables=0)
예제 #45
0

def p_match_block(p):
    ''' match_block : MATCH ident BEGIN_BLOCK match_stmt END_BLOCK'''
    lis.append(p.slice)


def p_match_stmt(p):
    ''' match_stmt : non_block_stmt mat non_block_stmt 
		| LIT_INTEGER mat non_block_stmt'''
    lis.append(p.slice)


def p_struct_block(p):
    ''' struct_block : STRUCT ident BEGIN_BLOCK struct_stmt END_BLOCK '''
    lis.append(p.slice)


def p_struct_stmt(p):
    ''' struct_stmt : ident COLON i32 '''
    lis.append(p.slice)


def p_error(p):
    print "ERROR: error in parsing phase " + str(p)
    lis.append(p.slice)


parser = yacc.yacc(start='compilation_unit')
#lis.reverse()
예제 #46
0
 def __init__(self, lexer=None):
     lexer = lexer or Lexer()
     self.tokens = lexer.tokens
     self._lexer = lexer
     self._parser = yacc.yacc(module=self, debug=False, write_tables=0)
예제 #47
0
    try:
        # p[0] = names[p[1]]
        p[0] = p[1]
        # p[0]=IdentiferN(p[1])
    except LookupError:
        # print(f"Undefined name {p[1]!r}")
        p[0] = 0
        raise Exception


def p_error(p):
    # print(f"Syntax error at {p.value!r}")
    raise SyntaxError


yacc.yacc(debug=False)
# if len (sys.argv)!=2:
#     print("Invalid Parameters")
# else:
#     filePath=sys.argv[1]
#     fileStream=open(filePath,'r')
#     for line in fileStream:
#         try:
#             rootNode=yacc.parse(line)
#             result=rootNode.eval()
#             if type(result) is str:print("\'"+result+"\'")
#             else:print(result)
#         except SyntaxError:
#             print("SYNTAX ERROR")
#         except Exception:
#             print("SEMANTIC ERROR")
예제 #48
0
def arbol(cedula):

    persona = Persona()
    
    #/////////////////////// LEX ///////////////////////
    tokens = ( 
        'ID_CEDULA',
        'ID_NOMBRE_COMPLETO', 
        'ID_SEXO', 
        'ID_CONOCIDO_COMO', 
        'ID_FECHA_DE_NACIMIENTO', 
        'ID_NOMBRE_DEL_PADRE', 
        'ID_NACIONALIDAD', 
        'ID_IDENTIFICACION_DEL_PADRE', 
        'ID_EDAD', 
        'ID_NOMBRE_DE_LA_MADRE', 
        'ID_IDENTIFICACION_DE_LA_MADRE',
        'CEDULA', 'NOMBRE', 'SEXO', 'FECHA', 'SPAN_INICIO', 'SPAN_STYLE', 'SPAN_CIERRE', 
        )
    
    t_ID_CEDULA = 'lblcedula'
    t_ID_NOMBRE_COMPLETO = 'lblnombrecompleto'
    t_ID_SEXO = 'lblsexo'
    t_ID_CONOCIDO_COMO = 'lblconocidocomo'
    t_ID_FECHA_DE_NACIMIENTO = 'lblfechaNacimiento'
    t_ID_NOMBRE_DEL_PADRE = 'lblnombrepadre'
    t_ID_NACIONALIDAD = 'lblnacionalidad'
    t_ID_IDENTIFICACION_DEL_PADRE = 'lblid_padre'
    t_ID_EDAD = 'lbledad'
    t_ID_NOMBRE_DE_LA_MADRE = 'lblnombremadre'
    t_ID_IDENTIFICACION_DE_LA_MADRE = 'lblid_madre'
    
    #t_CEDULA = r'[0-9]{9}'
    def t_CEDULA(t):
        r'[0-9]{9}'
        #print 'cedula ' + t.value
        return t
        
    #t_NOMBRE = r'[A-Z ]+'
    def t_NOMBRE(t):
        r'[A-Z]+ [A-Z]+ [A-Z ]+'
        #print 'nombre ' + t.value
        return t
    
    #t_SEXO = r'MASCULINO|FEMENINO'
    def t_SEXO(t):
        r'MASCULINO|FEMENINO'
        print 'sexo ' + t.value
        return t
    
    #t_FECHA = r'[0-9]{2}\/[0-9]{2}\/[0-9]{4}'
    def t_FECHA(t):
        r'[0-9]{2}\/[0-9]{2}\/[0-9]{4}'
        #print 'fecha ' + t.value
        return t
    
    #t_SPAN_INICIO = r' <span id=\"'
    def t_SPAN_INICIO(t):
        r'<span\sid=\"'
        #print 'span inicio'
        return t
    
    #t_SPAN_STYLE = r'\" style=\"display:inline\-block;color:Navy;font\-family:Arial;font\-size:Smaller;width:344px;\">'
    def t_SPAN_STYLE(t):
        r'\"\sstyle=\"[a-zA-Z0-9\-;:]+\">'
        #print 'span style '
        return t
    
    #t_SPAN_CIERRE = r'</span>'
    def t_SPAN_CIERRE(t):
        r'</span>'
        #print 'span cierre'
        return t
    
    def t_error(t):
        t.lexer.skip(1)
        
    lex.lex()
# //////////////////// FIN DE LEX ////////////////////

# /////////////////////// YACC ///////////////////////
    # reglas de parseo   
    def p_elementos(p):
        'elementos : nombre_completo nombre_del_padre nombre_de_la_madre '
    
    def p_nombre_completo(p):
        'nombre_completo : SPAN_INICIO ID_NOMBRE_COMPLETO SPAN_STYLE NOMBRE SPAN_CIERRE'
        print p[2] + ' : ' + p[4]
        persona.nombre = p[4]

    def p_nombre_del_padre(p):
        'nombre_del_padre : SPAN_INICIO ID_NOMBRE_DEL_PADRE SPAN_STYLE NOMBRE SPAN_CIERRE'
        print p[2] + ' : ' + p[4]
        persona.nombreDelPadre = p[4]
        
    def p_nombre_de_la_madre(p):
        'nombre_de_la_madre : SPAN_INICIO ID_NOMBRE_DE_LA_MADRE SPAN_STYLE NOMBRE SPAN_CIERRE'
        print p[2] + ' : ' + p[4]
        persona.nombreDeLaMadre = p[4]
    
    def p_error(p):
        #print "Syntax error at token", p.type
        # Just discard the token and tell the parser it's okay.
        yacc.errok()
    
    yacc.yacc()
예제 #49
0
파일: parse.py 프로젝트: yisonghan/smop
    assert isinstance(p[4], node.stmt_list)
    p[0] = node.while_stmt(cond_expr=p[2], stmt_list=p[4])


@exceptions
def p_error(p):
    if p is None:
        raise_exception(SyntaxError, "Unexpected EOF", new_lexer)
    if p.type == "COMMENT":
        # print "Discarded comment", p.value
        parser.errok()
        return
    raise_exception(SyntaxError,
                    ('Unexpected "%s" (parser)' % p.value),
                    new_lexer)
parser = yacc.yacc(start="top")


@exceptions
def parse(buf):
    if "P" in options.debug:
        import pdb
        pdb.set_trace()
    global new_lexer  # used in main.main()
    new_lexer = lexer.new()
    p = parser.parse(
        buf, tracking=1, debug=options.debug_parser, lexer=new_lexer)

    if "P" in options.debug:
        for i, pi in enumerate(p):
            print i, pi.__class__.__name__, pi._backend()
예제 #50
0
    t[0] = AST("constant", [t[1]])


def p_empty(t):
    'empty : '
    t[0] = AST("empty")


def p_error(t):
    print("Whoa. We're  hosed")


# Reading input file
f = open('input.txt', "r")
data = f.read()
f.close()

# Build the grammar
parser = yacc.yacc()
yacc.yacc(method='LALR', write_tables=True, debug=True)
parser.parse(input=data)

answer = "{"
answer += (ast_roots_lits[0].__repr__())
answer += "}"

f = open("output.txt", "w")
f.write(answer)
f.close()

예제 #51
0
def get_yacc():
    return yacc.yacc()
예제 #52
0
def make_parser():
    start = "spec"

    def p_spec(p):
        """spec : externcode doccomment WORD OP_COLON typedecls funcdecls states invariants methods externcode"""
        p[0] = syntax.Spec(p[3], p[5], p[6], p[7], p[8], p[9], p[1], p[10],
                           p[2])

    def p_doccomment(p):
        """doccomment :
                      | DOCCOMMENT"""
        p[0] = p[1] if len(p) > 1 else ""

    def p_externcode(p):
        """externcode :
                      | EXTERNCODETOKEN"""
        p[0] = p[1] if len(p) > 1 else ""

    parsetools.multi(locals(), "typedecls", "typedecl")

    def p_typedecl(p):
        """typedecl : KW_TYPE WORD OP_ASSIGN type
                    | KW_HANDLETYPE WORD OP_ASSIGN type"""
        if p[1] == "type":
            p[0] = (p[2], p[4])
        elif p[1] == "handletype":
            p[0] = (p[2], syntax.THandle(p[2], p[4]))

    def p_type(p):
        """type : WORD
                | WORD OP_LT type OP_GT
                | OP_OPEN_BRACE typednames OP_CLOSE_BRACE
                | KW_ENUM OP_OPEN_BRACE enum_cases OP_CLOSE_BRACE
                | OP_OPEN_PAREN typelist OP_CLOSE_PAREN
                | KW_NATIVE STRINGLITERAL"""
        if len(p) == 2:
            p[0] = syntax.TNamed(p[1])
        elif len(p) == 3:
            p[0] = syntax.TNative(p[2])
        elif len(p) == 5:
            if p[1] == "enum":
                p[0] = syntax.TEnum(p[3])
            else:
                p[0] = syntax.TApp(p[1], p[3])
        elif len(p) == 4:
            if p[1] == "{":
                p[0] = syntax.TRecord(p[2])
            elif p[1] == "(":
                p[0] = syntax.TTuple(p[2])

    parsetools.multi(locals(), "enum_cases", "WORD", sep="OP_COMMA")

    def p_typedname(p):
        """typedname : WORD OP_COLON type"""
        p[0] = (p[1], p[3])

    parsetools.multi(locals(), "typednames", "typedname", sep="OP_COMMA")
    parsetools.multi(locals(), "typelist", "type", sep="OP_COMMA")

    def p_func(p):
        """func : KW_EXTERN WORD OP_OPEN_PAREN typednames OP_CLOSE_PAREN OP_COLON type OP_ASSIGN STRINGLITERAL"""
        p[0] = syntax.ExternFunc(p[2], p[4], p[7], p[9])

    parsetools.multi(locals(), "funcdecls", "func")

    def p_statevar(p):
        """statevar : KW_STATE WORD OP_COLON type"""
        p[0] = (p[2], p[4])

    parsetools.multi(locals(), "states", "statevar")

    def p_assume(p):
        """assume : KW_ASSUME exp OP_SEMICOLON"""
        p[0] = p[2]

    def p_invariant(p):
        """invariant : KW_INVARIANT exp OP_SEMICOLON"""
        p[0] = p[2]

    parsetools.multi(locals(), "assumes", "assume")
    parsetools.multi(locals(), "invariants", "invariant")

    precedence = (("nonassoc", "KW_ELSE",
                   "OP_COLON"), ("left", "OP_SEMICOLON"), ("left", "OP_COMMA"),
                  ("left", "OP_QUESTION"), ("left", "OP_IMPLIES"),
                  ("left", "KW_AND", "KW_OR"),
                  ("left", "OP_EQ", "OP_NE", "OP_LT", "OP_LE", "OP_GT",
                   "OP_GE"), ("left", "OP_PLUS", "OP_MINUS"),
                  ("left", "OP_TIMES"), ("left", "KW_IN"),
                  ("left", "KW_NOT", "KW_DISTINCT", "KW_UNIQUE", "KW_EMPTY",
                   "KW_EXISTS", "KW_THE", "KW_MIN", "KW_MAX", "KW_ARGMIN",
                   "KW_ARGMAX", "KW_SUM", "KW_ANY", "KW_ALL", "KW_LEN",
                   "KW_REVERSED"), ("left", "OP_OPEN_BRACKET"),
                  ("left", "OP_OPEN_PAREN"), ("left", "OP_DOT"),
                  ("left", "KW_OP", "KW_QUERY", "KW_PRIVATE"))

    def p_exp_strlit(p):
        """exp : STRINGLITERAL"""
        p[0] = syntax.EStr(p[1])

    def p_lambda(p):
        """lambda : OP_OPEN_BRACE WORD OP_RIGHT_ARROW exp OP_CLOSE_BRACE"""
        p[0] = syntax.ELambda(syntax.EVar(p[2]), p[4])

    def p_slice(p):
        """slice : exp
                 | exp OP_COLON
                 | OP_COLON exp
                 | exp OP_COLON exp"""
        if len(p) == 2:
            p[0] = p[1]
        elif len(p) == 3:
            if p[1] == ":":
                p[0] = (None, p[2])
            elif p[2] == ":":
                p[0] = (p[1], None)
        elif len(p) == 4:
            p[0] = (p[1], p[3])

    def p_exp(p):
        """exp : NUM
               | FLOAT
               | WORD
               | WORD OP_OPEN_PAREN exp_list OP_CLOSE_PAREN
               | KW_TRUE
               | KW_FALSE
               | exp OP_PLUS  exp
               | exp OP_MINUS exp
               | exp OP_TIMES exp
               | exp OP_EQ exp
               | exp OP_NE exp
               | exp OP_LT exp
               | exp OP_LE exp
               | exp OP_GT exp
               | exp OP_GE exp
               | exp KW_AND exp
               | exp KW_OR exp
               | exp OP_IMPLIES exp
               | exp OP_QUESTION exp OP_COLON exp
               | exp OP_OPEN_BRACKET slice OP_CLOSE_BRACKET
               | KW_NOT exp
               | OP_MINUS exp
               | exp KW_IN exp
               | KW_UNIQUE exp
               | KW_DISTINCT exp
               | KW_EMPTY exp
               | KW_THE exp
               | KW_MIN exp
               | KW_MAX exp
               | KW_ARGMIN lambda exp
               | KW_ARGMAX lambda exp
               | KW_SUM exp
               | KW_LEN exp
               | KW_ANY exp
               | KW_ALL exp
               | KW_EXISTS exp
               | KW_REVERSED exp
               | exp OP_DOT NUM
               | exp OP_DOT WORD
               | OP_OPEN_PAREN exp_list OP_CLOSE_PAREN
               | OP_OPEN_BRACE record_fields OP_CLOSE_BRACE
               | OP_OPEN_BRACKET exp OP_CLOSE_BRACKET
               | OP_OPEN_BRACKET exp OP_VBAR comprehension_body OP_CLOSE_BRACKET"""
        if len(p) == 2:
            if type(p[1]) is syntax.ENum:
                p[0] = p[1]
            elif p[1] == "true":
                p[0] = syntax.EBool(True)
            elif p[1] == "false":
                p[0] = syntax.EBool(False)
            else:
                p[0] = syntax.EVar(p[1])
        elif len(p) == 3:
            if p[1] == "min":
                p[0] = syntax.EArgMin(
                    p[2], syntax.ELambda(syntax.EVar("x"), syntax.EVar("x")))
            elif p[1] == "max":
                p[0] = syntax.EArgMax(
                    p[2], syntax.ELambda(syntax.EVar("x"), syntax.EVar("x")))
            else:
                p[0] = syntax.EUnaryOp(p[1], p[2])
        elif len(p) == 4:
            if p[1] == "(":
                exps = p[2]
                if len(exps) == 0:
                    raise Exception("illegal ()")
                elif len(exps) == 1:
                    p[0] = exps[0]
                elif len(exps) > 1:
                    p[0] = syntax.ETuple(tuple(exps))
            elif p[1] == "[":
                p[0] = syntax.ESingleton(p[2])
            elif p[1] == "{":
                p[0] = syntax.EMakeRecord(p[2])
            elif p[2] == ".":
                if isinstance(p[3], syntax.ENum):
                    p[0] = syntax.ETupleGet(p[1], p[3].val)
                else:
                    p[0] = syntax.EGetField(p[1], p[3])
            elif p[1] == "argmin":
                p[0] = syntax.EArgMin(p[3], p[2])
            elif p[1] == "argmax":
                p[0] = syntax.EArgMax(p[3], p[2])
            else:
                p[0] = syntax.EBinOp(p[1], p[2], p[3])
        else:
            if p[2] == "?":
                p[0] = syntax.ECond(p[1], p[3], p[5])
            elif p[2] == "[":
                if isinstance(p[3], syntax.Exp):
                    p[0] = syntax.EListGet(p[1], p[3])
                elif isinstance(p[3], tuple):
                    start = p[3][0]
                    end = p[3][1]
                    if start is None:
                        start = syntax.ZERO
                    if end is None:
                        end = syntax.ELen(p[1])
                    p[0] = syntax.EListSlice(p[1], start, end)
            elif p[1] == "[":
                p[0] = syntax.EListComprehension(p[2], p[4])
            elif p[2] == "(":
                p[0] = syntax.ECall(p[1], p[3])
            else:
                assert False, "unknown case: {}".format(repr(p[1:]))

    parsetools.multi(locals(), "exp_list", "exp", sep="OP_COMMA")

    def p_record_field(p):
        """record_field : WORD OP_COLON exp"""
        p[0] = (p[1], p[3])

    parsetools.multi(locals(), "record_fields", "record_field", sep="OP_COMMA")

    def p_comprehension_clause(p):
        """comprehension_clause : WORD OP_LEFT_ARROW exp
                                | exp"""
        if len(p) == 2:
            p[0] = syntax.CCond(p[1])
        else:
            p[0] = syntax.CPull(p[1], p[3])

    parsetools.multi(locals(),
                     "comprehension_body",
                     "comprehension_clause",
                     sep="OP_COMMA")

    def p_accesschain(p):
        """accesschain : WORD
                       | accesschain OP_DOT WORD"""
        if len(p) > 2:
            p[0] = syntax.EGetField(p[1], p[3])
        else:
            p[0] = syntax.EVar(p[1])

    def p_visibility(p):
        """visibility :
                      | KW_PRIVATE"""
        if len(p) > 1:
            p[0] = syntax.Visibility.Private
        else:
            p[0] = syntax.Visibility.Public

    def p_method(p):
        """method : doccomment            KW_OP    WORD OP_OPEN_PAREN typednames OP_CLOSE_PAREN assumes stm
                  | doccomment visibility KW_QUERY WORD OP_OPEN_PAREN typednames OP_CLOSE_PAREN assumes exp"""
        if p[2] == "op":
            p[0] = syntax.Op(p[3], p[5], p[7], p[8], p[1])
        else:
            p[0] = syntax.Query(p[4], p[2], p[6], p[8], p[9], p[1])

    parsetools.multi(locals(), "methods", "method")

    def p_maybeelse(p):
        """maybeelse :
                     | KW_ELSE block"""
        if len(p) > 1:
            p[0] = p[2]
        else:
            p[0] = syntax.SNoOp()

    def p_block(p):
        """block : OP_OPEN_BRACE stm OP_CLOSE_BRACE"""
        p[0] = p[2]

    def p_basicstm(p):
        """basicstm : accesschain OP_OPEN_PAREN exp_list OP_CLOSE_PAREN OP_SEMICOLON
                    | accesschain OP_ASSIGN exp OP_SEMICOLON
                    | KW_IF exp block maybeelse
                    | KW_LET WORD OP_ASSIGN exp OP_SEMICOLON"""
        if p[1] == "if":
            p[0] = syntax.SIf(p[2], p[3], p[4])
        elif p[1] == "let":
            p[0] = syntax.SDecl(p[2], p[4])
        elif p[2] == "(":
            if not isinstance(p[1], syntax.EGetField):
                report_parse_error(
                    p[1],
                    "Method calls must have the form `target.method(...)`")
            p[0] = syntax.SCall(p[1].e, p[1].f, p[3])
        else:
            p[0] = syntax.SAssign(p[1], p[3])

    def p_stm(p):
        """stm :
               | basicstm stm"""
        if len(p) > 1:
            if isinstance(p[2], syntax.SNoOp):
                p[0] = p[1]
            else:
                p[0] = syntax.SSeq(p[1], p[2])
        else:
            p[0] = syntax.SNoOp()

    def p_empty(p):
        'empty :'
        pass

    def p_error(p):
        if p is None:
            raise Exception("Unexpected end-of-file")
        raise Exception("Syntax error on line {} at {}".format(p.lineno, p))

    return yacc.yacc()
예제 #53
0
 def __init__(self):
     print("Parser called")
     self.parser = yacc.yacc(module=self)
예제 #54
0
파일: metaL.py 프로젝트: ponyatov/OGP
def p_ex_vector(p):
    ' ex : lq vector rq '
    p[0] = p[2]


def p_vector(p):
    ' vector : '
    p[0] = Vector('')


def p_error(p):
    raise SyntaxError(p)


parser = yacc.yacc(debug=False, write_tables=False)

#################################################################### system init

with open(__file__[:-3] + '.ini') as ini:
    parser.parse(ini.read())

try:
    import uwsgi
    # https://uwsgi-docs.readthedocs.io/en/latest/PythonModule.html
    web = vm['WEB']

    def uwsgi_stop(ctx):
        uwsgi.stop()

    vm['BYE'] = Command(uwsgi_stop)
예제 #55
0
 def __init__(self):
     self._lexer = Lexer()
     self._parser = yacc.yacc(module=self)
예제 #56
0
 def __init__(self):
     super().__init__()
     self.yacc = yacc.yacc(module=self)
     self.point = {}
예제 #57
0
파일: gen-wasm.py 프로젝트: icefoxen/wabt
    'data : data STRING'
    p[0] = p[1]
    WriteString(p[0], p[2])


def p_data_empty(p):
    'data :'
    p[0] = []


def p_error(p):
    raise Error('%d: syntax error, %s' % (p.lineno, p))


parser = yacc.yacc(tabmodule='gen_wasm',
                   debugfile='gen_wasm_debug.txt',
                   outputdir=OUT_DIR)

################################################################################


def Run(input_file_name):
    with open(input_file_name) as input_file:
        input_data = input_file.read()
    data = parser.parse(input_data)
    # convert to bytes
    data = bytearray(data)
    return data


def main(args):
def Hoi4Yaccer():

    Hoi4Lexer()

    # match for stuff = <SOMETHING>
    def p_allocation(p):
        '''allocation : STRING EQUAL STRING
                      | STRING EQUAL list
                      | STRING EQUAL dict
                      | STRING EQUAL NUMBER
                      | STRING GT NUMBER
                      | STRING LT NUMBER
                      | STRING EQUAL QUOTED_STRING
        '''
        if p[2] == '=':
            p[0] = {p[1]: p[3]}
        else:
            # paradox format allows for inequalities, which doesn't map cleanly into json
            # but these are pretty rare (seen only for radar and sonar slots count on ships)
            # so we put them in a special format
            p[0] = {p[1]: {'operation': p[2], 'value': p[3]}, 'META': 'INEQ'}

    # match for { stuff1 stuff2 stuff3 }
    def p_list(p):
        '''list : LBRACKET string_items RBRACKET
        '''
        p[0] = p[2]

    # match for { }
    def p_empty_list(p):
        "list : LBRACKET RBRACKET"
        p[0] = []

    # match for 'stuff1 stuff2 stuff3' (content of { stuff1 stuff2 stuff3 })
    def p_elements(p):
        '''string_items : STRING string_items
                        | QUOTED_STRING string_items
        '''
        p[2].append(p[1])
        p[0] = p[2]

    # termination for previous
    def p_element_single(p):
        '''string_items : STRING
                        | QUOTED_STRING
        '''
        p[0] = [p[1]]

    # match for:
    # {
    #    stuff1 = <SOMETHING>
    #    stuff2 = <SOMETHING_ELSE>
    # }
    def p_dict(p):
        '''dict : LBRACKET allocation_items RBRACKET'''
        p[0] = p[2]

    # match for content of previous:
    #    stuff1 = <SOMETHING>
    #    stuff2 = <SOMETHING_ELSE>
    def p_allocation_items(p):
        '''allocation_items : allocation allocation_items'''
        p[0] = merge_two_dicts(p[1], p[2])

    # termination of previous
    def p_allocation_single(p):
        '''allocation_items : allocation'''
        p[0] = p[1]

    def p_error(p):
        print("Syntax error at '%s'" % p.value)

    return yacc.yacc()
예제 #59
0
파일: main.py 프로젝트: SYVTT/compilers-lab
import sys
import ply.yacc as yacc
from Cparser import Cparser
from TreePrinter import TreePrinter

if __name__ == '__main__':

    try:
        filename = sys.argv[1] if len(sys.argv) > 1 else "example.txt"
        file = open(filename, "r")
    except IOError:
        print("Cannot open {0} file".format(filename))
        sys.exit(0)

    Cparser = Cparser()
    parser = yacc.yacc(module=Cparser)
    text = file.read()
    ast = parser.parse(text, lexer=Cparser.scanner)
예제 #60
0
def p_expression_fwsp(p):
    'fwsp : FWSP'
    p[0] = p[1].replace('\r\n', '')


def p_error(p):
    if p:
        raise SyntaxError('syntax error: token=%s, lexpos=%s' %
                          (p.value, p.lexpos))
    raise SyntaxError('syntax error: eof')


# Build the parsers

log.info('building mailbox parser')
mailbox_parser = yacc.yacc(start='mailbox', errorlog=log)

log.info('building addr_spec parser')
addr_spec_parser = yacc.yacc(start='addr_spec', errorlog=log)

log.info('building url parser')
url_parser = yacc.yacc(start='url', errorlog=log)

log.info('building mailbox_or_url parser')
mailbox_or_url_parser = yacc.yacc(start='mailbox_or_url', errorlog=log)

log.info('building mailbox_or_url_list parser')
mailbox_or_url_list_parser = yacc.yacc(start='mailbox_or_url_list',
                                       errorlog=log)

# Interactive prompt for easy debugging