Esempio n. 1
0
    def include_end(self):
        ''' Performs and end of include.
        '''
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if self.filestack == []:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()  # Creates the token
        result.value = self.put_current_line()
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result
Esempio n. 2
0
    def include_end(self):
        """ Performs and end of include.
        """
        self.lex = self.filestack[-1][2]
        self.input_data = self.filestack[-1][3]
        self.filestack.pop()

        if not self.filestack:  # End of input?
            return

        self.filestack[-1][1] += 1  # Increment line counter of previous file

        result = lex.LexToken()
        result.value = self.put_current_line(suffix='\n')
        result.type = '_ENDFILE_'
        result.lineno = self.lex.lineno
        result.lexpos = self.lex.lexpos

        return result
    def trace(lexer, *argv):
        token = lexer.token_()
        if not quiet_mode:
            sys.stderr.write("TOKEN ")
            sys.stderr.write(repr( token ) )
            sys.stderr.write("\n")
            sys.stderr.flush()

        if token is not None:
            return token

        if len(lexer.indentation_stack) == 1:
            return 

        lexer.indentation_stack.pop()
        token = lex.LexToken()
        token.value = ''
        token.type = 'DEDENT'
        token.lineno = lexer.lineno
        token.lexpos = lexer.lexpos
        token.lexer = lexer
        return token
Esempio n. 4
0
def t_INCLUDE(t):
    r'\#[ \t]*include[ \t]+\"(.|(\\[ntr0\'"])|(\\x[0-9a-fA-F][0-9a-fA-F]))*?\"'
    include_file = re.split('[ \t]+', t.value)[-1].strip('\"')
    global last_newline
    if t.lexpos != last_newline:
        print('Error: "#include" is not at the beginning of the line!')
        pass
    elif include_file in included.keys():
        print('Warning: File "%s" already included!' % include_file)
        pass
    elif not os.path.isfile(include_file):
        print('Error: No such file or directory: "%s"!' % include_file)
        sys.exit()
    elif not os.access(include_file, os.R_OK):
        print('Error: Can\'t open file or directory: "%s"!' % include_file)
        sys.exit()
    else:
        token_list.append(t)
        included[include_file] = '1'
        include_lexer = lex.lex()
        include_fin = open(include_file, 'r')
        include_data = include_fin.read()
        include_fin.close()
        last_newline = 0
        include_lexer.input(include_data)
        while True:
            toke = include_lexer.token()
            if not toke:
                break  # No more input
            token_list.append(toke)
        t.value = include_file
        kalimera = lex.LexToken()
        kalimera.type = 'END_INCLUDE'
        kalimera.value = t.value
        kalimera.lineno = 0
        kalimera.lexpos = 0
        token_list.append(kalimera)
        pass
Esempio n. 5
0
File: lexer.py Progetto: rwl/enaml
    def create_py_blocks(self, token_stream):
        for tok in token_stream:
            if not tok.type == 'PY_BLOCK_START':
                yield tok
                continue

            # yield the start token since it's needed by the parser
            start_tok = tok
            yield start_tok

            # The next token must be a newline or its a syntax error
            try:
                nl_tok = token_stream.next()
            except StopIteration:
                nl_tok = None

            if nl_tok is None or nl_tok.type != 'NEWLINE':
                if nl_tok is None:
                    # create a fake token with a line number
                    # for the error handler.
                    nl_tok = lex.LexToken()
                    nl_tok.lineno = start_tok.lineno
                msg = 'Newline required after a ":: python ::" tag'
                raise_syntax_error(msg, nl_tok)

            # yield the newline token since it's needed by the parser
            yield nl_tok

            # Collect the Python code from the block
            py_toks = []
            end_tok = None
            for tok in token_stream:
                if tok.type == 'PY_BLOCK_END':
                    end_tok = tok
                    break
                elif tok.type == 'NEWLINE':
                    py_toks.append(tok)
                else:
                    assert tok.type == 'PY_BLOCK_CONTINUE', tok.type
                    py_toks.append(tok)

            if end_tok is None:
                # Reach end of input without an :: end :: delimiter
                msg = 'EOF while scanning raw python block'
                raise_syntax_error(msg, start_tok)

            # Create the python text to add to the py block token
            # creating blank lines as necessary so that syntax errors
            # get reported with correct line numbers. The captured
            # text gets handed directly to Python's compile function.
            leader = '\n' * start_tok.lineno
            py_txt = leader + ''.join(tok.value for tok in py_toks)

            # create a python token
            py_block = lex.LexToken()
            py_block.lineno = start_tok.lineno + 1
            py_block.lexpos = -1
            py_block.value = py_txt
            py_block.type = 'PY_BLOCK'

            # Yield the py block to the parser
            yield py_block

            # Yield the end block to the parser
            yield end_tok

            # An end token must be followed by a newline
            try:
                nl_tok = token_stream.next()
            except StopIteration:
                nl_tok = None

            if nl_tok is None or nl_tok.type != 'NEWLINE':
                if nl_tok is None:
                    # create a fake token with a line number
                    # for the error handler.
                    nl_tok = lex.LexToken()
                    nl_tok.lineno = end_tok.lineno
                msg = 'Newline required after a ":: end ::" tag'
                raise_syntax_error(msg, nl_tok)

            # The parser requires the newline token
            yield nl_tok
Esempio n. 6
0
def makeToken(value, type):
    """Helper function to quickly make a token"""
    tok = lex.LexToken()
    tok.value = value
    tok.type = type
    return tok
Esempio n. 7
0
 def p_def_func(p):
     'def_func : id opar arg_list cpar colon type ocur expr ccur semi'
     p[0] = FuncDeclarationNode(p[1], p[3], p[6], p[8])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 8
0
def _new_token(type, lineno):
    tok = lex.LexToken()
    tok.type = type
    tok.value = None
    tok.lineno = lineno
    return tok
Esempio n. 9
0
 def p_program(p):
     'program : class_list'
     p[0] = ProgramNode(p[1])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 10
0
def t_comment(dummy_t):
    r'\#.*'
    pass


def t_error(t):
    ''' Prints error messages when scan fails '''
    print("Illegal character at line {} '{}'".format(t.lexer.lineno, \
        t.value[0]))
    t.lexer.skip(1)


import ply.lex as lex
lexer = lex.lex(debug=0)  # Build the lexer

EOF = lex.LexToken()
EOF.type = 'EOF'


###############################################################################
# Classes that define nodes in a tree of nodes constructed by the parser
###############################################################################
class NumNode:
    def __init__(self, num):
        self.num = num

    def run(self):
        return self.num

    def generate(self):
        return str(self.num)
Esempio n. 11
0
 def p_expr_while(p):
     'expr : while expr loop expr pool'
     p[0] = WhileNode(p[2], p[4])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 12
0
 def p_expr_instantiate(p):
     'expr : new type'
     p[0] = InstantiateNode(p[2])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 13
0
 def p_expr_isvoid(p):
     'expr : isvoid expr'
     p[0] = IsVoidNode(p[2])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 14
0
 def p_expr_case(p):
     'expr : case expr of case_list esac'
     p[0] = SwitchCaseNode(p[2], p[4])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 15
0
 def p_expr_let(p):
     'expr : let decl_list in expr'
     p[0] = LetInNode(p[2], p[4])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 16
0
 def p_expr_chunk(p):
     'expr : ocur chunk ccur'
     p[0] = ChunkNode(p[2])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 17
0
 def p_expr_assign(p):
     'expr : id assignArrow expr'
     p[0] = AssignNode(p[1], p[3])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 18
0
 def get_dedent(self):
     return lex.LexToken('DEDENT', 1, self.lexer.lineno)  # important
Esempio n. 19
0
 def p_expr_id(p):
     'expr : id'
     p[0] = VariableNode(p[1])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 20
0
 def p_expr_if(p):
     'expr : if expr then expr else expr fi'
     p[0] = ConditionalNode(p[2], p[4], p[6])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 21
0
 def get_newline(self):
     return lex.LexToken('NEWLINE', 1, self.lexer.lineno)
Esempio n. 22
0
 def p_expr_int(p):
     'expr : number'
     p[0] = ConstantNumNode(p[1])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]
Esempio n. 23
0
def _MakeLexToken(token_type, value, lineno=1, lexpos=0):
  """Makes a LexToken with the given parameters. (Note that lineno is 1-based,
  but lexpos is 0-based.)"""
  rv = lex.LexToken()
  rv.type, rv.value, rv.lineno, rv.lexpos = token_type, value, lineno, lexpos
  return rv
Esempio n. 24
0
 def p_expr_string(p):
     'expr : string'
     p[0] = StringNode(p[1])
     p[0].token_list = [
         sl for sl in p.slice if type(lt.LexToken()) == type(sl)
     ]