示例#1
0
    def test_select_simple_lexer_if_ply_not_present(self, rules):
        def _import(x):
            raise ImportError

        with patch('ox.lexer._import', _import):
            lexer = ox.make_lexer(rules)
            assert lexer.which == 'simple'
def make_lexer():
    return ox.make_lexer([
        ('NUMBER', r'[+-]?\d+(\.\d+)?'),
        ('SYMBOL', r'[a-zA-Z][_a-zA-Z0-9]*'),
        ('OP_SUM', r'[+-]'),
        ('OP_MUL', r'[*/]'),
        ('CONTROL', r'[(),=;%]'),
    ])
示例#3
0
def lexer(string):

    lexer = ox.make_lexer([
        ('ANSWER', r'ANSWER:\s*[a-zA-Z]'),
        ('OPTION', r'[a-zA-Z][.)]\s'),
        ('ANY', r'.*\n'),
    ])

    return lexer(string)
示例#4
0
def create_lexer():
    lexer = ox.make_lexer([
        ('SECTION_TITLE', r'\[[^\]]+\]\n*'),
        ('SUBSECTION_TITLE', r'\[\[[^\]]+\]\]\n*'),
        ('DATA', r'\{[^\}]+\}\n*'),
        ('ignore_COMMENT', r'\#[^\n]+\n*'),
        ('STRING', r'[^=^#^\n^\[^\]^\}^\{]+\n*'),
        ('EQUAL', r'='),
    ])
    return lexer
示例#5
0
def make_lexer():
    return ox.make_lexer([
        ('CONTROL', r'[()]'),
        ('NUMBER', r'[-+]?\d+(\.\d+)?'),
        ('SYMBOL', r'[-+*/!~@#$%&=?<>.,^]+'),
        ('NAME', r'[a-z]+'),
        ('STRING', r'"..."'),
        ('_COMMENT', r';...'),

        # Palavras reservadas
        ('r_IF', 'if'),
        ('r_LET', 'let'),
    ])
示例#6
0
文件: gift.py 项目: fabiommendes/gift
    def lexer(self, string):
        lexer = ox.make_lexer([
            ('ANSWER', r'=.+?(?=[~\#])'),
            ('OPTION', r'~.+?(?=[~}\#])'),
            ('FEEDBACK', r'\#.+?(?=[~}])'),
            ('QUESTION', r'.+?(?={)'),
            ('TRUE_VALUE', r'T|TRUE'),
            ('FALSE_VALUE', r'F|FALSE'),
            ('LBRACKET', r'{'),
            ('RBRACKET', r'}'),
        ])

        return lexer(string)
示例#7
0
def make_lexer():
    lexer_rules = [
        ('STRING', r'"[^"\n]*(\\"[^"\n]*)*"'),
        ('NUMBER', r'[+-]?\d+(\.\d+)?'),
        ('KEYWORD', r'true|false|null'),
        ('LBRACE', r'\{'),
        ('RBRACE', r'\}'),
        ('LBRACK', r'\['),
        ('RBRACK', r'\]'),
        ('COLON', r'\:'),
        ('COMMA', r'\,'),
    ]

    lexer = ox.make_lexer(lexer_rules, which='simple')
    tokens = [tk for tk, _ in lexer_rules]
    return lexer, tokens
示例#8
0
from operator import add, mul, sub, truediv as div
import ox

lexer = ox.make_lexer({
    ('MUL', r'\*'),
    ('ADD', r'\+'),
    ('SUB', r'\-'),
    ('DIV', r'\*'),
    ('NUMBER', r'\d+'),
})

def make_ast(src):
    """
        convert soruce code to a list of numbers and operations

        >>>makeast('1 2 +')
        [1.0, 2.0, add]
    """
    tokens = lexer(src)
    return ...

def evaluate (src):
    """
    Avalia o resultado de uma expressao na notacao sufixa
    >>>evaluate('40 2 1 *+')
    42,0
    """
    ast = make_ast(src)
    ...
    return 0.0
示例#9
0
 def ply_lexer(self, rules):
     return ox.make_lexer(rules, which='ply')
import ox
import click
import pprint

lexer = ox.make_lexer([
    ('LOOP', r'loop'), ('DEC', r'dec'), ('INC', r'inc'), ('LEFT_PAR', r'\('),
    ('RIGHT_PAR', r'\)'), ('RIGHT', r'right'), ('LEFT', r'left'),
    ('PRINT', r'print'), ('READ', r'read'), ('DO', r'do'),
    ('DO_AFTER', r'do-after'), ('DO_BEFORE', r'do-before'), ('ADD', r'add'),
    ('SUB', r'sub'), ('NUMBER', r'\d+'), ('DEF', r'def'),
    ('FUCTION',
     r'(?!loop)(?!dec)(?!inc)(?!right)(?!left)(?!print)(?!read)(?!do)(?!do)(?!do)(?!add)(?!sub)(?!def)(?!\d)[a-zA-Z0-9-\+_]+'
     ), ('ignore_COMMENT', r';[ \S]*'), ('ignore_SPACE', r'\s+')
])

tokens_list = [
    'DEC', 'INC', 'LOOP', 'LEFT_PAR', 'RIGHT_PAR', 'RIGHT', 'LEFT', 'PRINT',
    'READ', 'DO', 'DO_AFTER', 'DO_BEFORE', 'ADD', 'SUB', 'NUMBER', 'DEF',
    'FUCTION'
]

parser = ox.make_parser([('expr : LEFT_PAR RIGHT_PAR', lambda x, y: '()'),
                         ('expr : LEFT_PAR term RIGHT_PAR', lambda x, y, z: y),
                         ('term : atom term', lambda x, y: (x, ) + y),
                         ('term : atom', lambda x: (x, )),
                         ('atom : expr', lambda x: x),
                         ('atom : DEC', lambda x: x),
                         ('atom : INC', lambda x: x),
                         ('atom : LOOP', lambda x: x),
                         ('atom : RIGHT', lambda x: x),
                         ('atom : LEFT', lambda x: x),
示例#11
0
import ox
import click
import pprint

lexer = ox.make_lexer([
    ('PARANTHESIS_OPENED', r'\('),
    ('PARANTHESIS_CLOSED', r'\)'),
    ('PLAIN_TEXT', r'[-a-zA-Z]+'),
    ('NUMBERS', r'\d+'),
    ('ignore_NEWLINE', r'\s+'),
    ('ignore_COMMENT', r';[^\n]*'),
])

tokens = [
    'PARANTHESIS_OPENED',
    'PARANTHESIS_CLOSED',
    'PLAIN_TEXT',
    'NUMBERS',
    'ignore_NEWLINE',
    'ignore_COMMENT',
]

parser = ox.make_parser([
    ('atom : PLAIN_TEXT', lambda x: x),
    ('atom : NUMBERS', lambda x: x),
    ('expr : atom expr', lambda x, y: (x, ) + y),
    ('expr : atom', lambda x: (x, )),
    ('block : PARANTHESIS_OPENED PARANTHESIS_CLOSED', lambda x, y: '()'),
    ('block : PARANTHESIS_OPENED expr PARANTHESIS_CLOSED', lambda x, y, z: y),
    ('atom : block', lambda x: x),
], tokens)
from operator import add, mul, sub, truediv as div
import ox

lexer = ox.make_lexer([
    ('MUL', r'\*'),
    ('ADD', r'\*'),
    ('SUB', r'\*'),
    ('DIV', r'\*'),
    ('NUMBER', r'\d+'),
])

def make_ast(src):
    """
    Converte código fonte para uma lista de números e operações

    >>> make_ast('1 2 +')
    [1.0, 2.0, add] 
    """
    tokens = lexer(src)
    return ...

def evaluate(src):
    """
    Avalia o resultado de uma expressão na notação sufixa.

    >>> evaluate('40 2 1 * +')
    42.0
    """
    ast = make_ast(src)
    ...
    return 0.0
示例#13
0
import ox
import operator as op

OPERATORS = {'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv}

lexer = ox.make_lexer([
    ('NUMBER', r'[0-9]+(\.[0-9]+)?'),
    ('OP_SUM', r'[+-]'),
    ('OP_MUL', r'[*/]'),
    ('LPAR', r'\('),
    ('RPAR', r'\)'),
])

identity = lambda x: x


def compute_operation(x, op, y):
    return OPERATORS[op](x, y)


parser = ox.make_parser([
    ('expr : term OP_SUM expr', compute_operation),
    ('expr : term', identity),
    ('term : atom OP_MUL term', compute_operation),
    ('term : atom', identity),
    ('atom : NUMBER', float),
    ('atom : LPAR expr RPAR', lambda x, y, z: y),
], ['NUMBER', 'OP_SUM', 'OP_MUL', 'LPAR', 'RPAR'])

while True:
    from pprint import pprint
import ox
import click

from interpreter import Interpreter

lexer = ox.make_lexer([('NAME', r'[-a-zA-Z]+'), ('NUMBER', r'\d+'),
                       ('PARENTESE_A', r'\('), ('PARENTESE_F', r'\)'),
                       ('COMMENT', r';.*'), ('NEWLINE', r'\n'),
                       ('SPACE', r'\s+')])

tokens = ['PARENTESE_F', 'PARENTESE_A', 'NUMBER', 'NAME']

name = lambda name: (name)
number = lambda number: (int(number))
op = lambda op: (op)

parser = ox.make_parser([
    ('program : PARENTESE_A expr PARENTESE_F', lambda x, y, z: y),
    ('program : PARENTESE_A PARENTESE_F', lambda x, y: '()'),
    ('expr : operator expr', lambda x, y: (x, ) + y),
    ('expr : operator', lambda x: (x, )),
    ('operator : program', op),
    ('operator : NAME', name),
    ('operator : NUMBER', number),
], tokens)


@click.command()
@click.argument('source', type=click.File('r'))
def make_tree(source):
    program = source.read()
示例#15
0
 def test_issues_an_error_with_invalid_lexer_name(self, rules):
     with pytest.raises(ValueError):
         ox.make_lexer(rules, which='foobar')
import ox
import click
import re
from getch import getche

lexer = ox.make_lexer([
    ('NAME',r'[a-zA-Z]+'),
    ('NUMBER', r'\d+'),
    ('OPENING_PARENTHESES', r'\('),
    ('CLOSING_PARENTHESES', r'\)'),
])

tokens_list = ['NUMBER', 'NAME','OPENING_PARENTHESES','CLOSING_PARENTHESES','COMMA']

atom_number = lambda value: ('atom_number', float(value))

parser = ox.make_parser([
    ('simple_block : simple_block simple_term', lambda first, second: (first, second)),
    ('simple_block : simple_term', lambda simple_block: simple_block),
    ('simple_term : OPENING_PARENTHESES simple_term CLOSING_PARENTHESES', lambda opening_paretheses, term, closing_parentheses: (opening_paretheses, term, closing_parentheses)),
    ('simple_term : atom simple_term',lambda first_term, second_term : (first_term, second_term)),
    ('simple_term : atom COMMA simple_term',lambda atom, comma, simple_term : (atom, comma, simple_term)),
    ('simple_term : atom', lambda term: term),
    ('atom : OPENING_PARENTHESES atom CLOSING_PARENTHESES', lambda opening_paretheses, term, closing_parentheses: (opening_paretheses, term, closing_parentheses)),
    ('atom : NUMBER', atom_number),
    ('atom : NAME',lambda name : name),
], tokens_list)

def pretty_print(code_p):
    indent = 0
示例#17
0
    ('DIGITS', r'\d+'),
    ('ignore_NEWLINE', r'\s+'),
    ('ignore_COMMENT', r';[^\n]*'),
]

parser_rules = [
    ('block : OPEN_PARANTHESIS CLOSE_PARANTHESIS', lambda x, y: '()'),
    ('block : OPEN_PARANTHESIS expr CLOSE_PARANTHESIS', lambda x, y, z: y),
    ('atom : TEXT', lambda x: x),
    ('atom : DIGITS', lambda x: x),
    ('expr : atom expr', lambda x, y: (x, ) + y),
    ('expr : atom', lambda x: (x, )),
    ('atom : block', lambda x: x),
]

lexer = ox.make_lexer(lexer_rules)
parser = ox.make_parser(parser_rules, tokens)


@click.command()
@click.argument('lispfcktree', type=click.File('r'))
def ast(lispfcktree):
    """Create ast."""
    lispcode = lispfcktree.read()
    tokens = lexer(lispcode)
    tree = parser(tokens)
    pprint.pprint(tree)


if __name__ == '__main__':
    ast()
示例#18
0
import ox
import click
from getch import getche

"""
Create tokens list and its lexer and parser
"""
lexer = ox.make_lexer([
    ('NUMBER', r'\d+'),
    ('NAME', r'[-a-zA-Z]+'),
    ('LPARAN', r'[(]'),
    ('RPARAN', r'[)]'),
    ('COMMENT', r';.*'),
    ('NEWLINE', r'\s+'),
])

tokens_list = ['NAME', 'NUMBER', 'LPARAN', 'RPARAN']

parser = ox.make_parser([
    ('stmt : LPARAN RPARAN', lambda x,y: '()'),
    ('stmt : LPARAN expr RPARAN', lambda x,y,z: y),
    ('expr : term expr', lambda x,y: [x] + y),
    ('expr : term', lambda x: [x]),
    ('term : stmt', lambda x: x),
    ('term : NUMBER', lambda x: int(x)),
    ('term : NAME', lambda x: x),
], tokens_list)

collection = [0]
p = 0
示例#19
0
import ox
import operator as operator

# \d representa digitos de 0 a 9
lexer = ox.make_lexer([
    ('NUMBER', r'\d+(\.\d*)?'),
    ('OP_S', r'[-+]'),
    ('OP_M', r'[*/]'),
    ('OP_P', r'[\^]'),
    ('PAR_O', r'\('),
    ('PAR_C', r'\)'),
])

tokens_list = ['NUMBER', 'OP_S', 'OP_M', 'OP_P', 'PAR_O', 'PAR_C']
infix = lambda x, op, y: (op, x, y)
atom = lambda x: ('atom', float(x))

parser = ox.make_parser([
    ('expr : expr OP_S term', infix),
    ('expr : term', lambda x: x),
    ('term : term OP_M res', infix),
    ('term : res', lambda x: x),
    ('res : res OP_P atom', infix),
    ('res : atom', lambda x: x),
    ('atom : NUMBER', atom),
], tokens_list)

OP_TO_FUNC = {
    '+': lambda x, y: x + y,
    '-': lambda x, y: x - y,
    '*': lambda x, y: x * y,
示例#20
0
import ox
import operator as op


OP_TO_FUNC = {'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv}


lexer = ox.make_lexer([
    ('NUMBER', r'\d+(\.\d*)?'),
    ('TERM_OP', r'[*/]'),
    ('EXP_OP', r'[-+]'),
    ('LPAREN', r'\('),
    ('RPAREN', r'\)')
])


parser = ox.make_parser([
    ('expr : expr EXP_OP term', lambda x, op, y: (op, x, y)),
    ('term : term TERM_OP value', lambda x, op, y: (op, x, y)),
    ('expr : term', lambda x: x),
    ('term : value', lambda x: x),
    ('value : NUMBER', lambda x: ('atom', x)),
], ['NUMBER', 'TERM_OP', 'EXP_OP'])


def eval(ast):
    head, *tail = ast
    if head == 'atom':
        return float(tail[0])
    else:
        x, y = tail
示例#21
0
 def simple_lexer(self, rules):
     return ox.make_lexer(rules, which='simple')
示例#22
0
import ox
import click
import pprint

lexer = ox.make_lexer([
    ('RIGHT', r'right'),  # > in brainfuck
    ('LEFT', r'left'),  # < in brainfuck
    ('INC', r'inc'),  # + in brainfuck
    ('DEC', r'dec'),  # - in brainfuck
    ('PRINT', r'print'),  # . in brainfuck
    ('READ', r'read'),  # , in brainfuck
    ('DO', r'do'),
    ('ADD', r'add'),
    ('SUB', r'sub'),
    ('LOOP', r'loop'),  # [] in brainfuck
    ('DEF', r'def'),
    ('NUMBER', r'\d+'),
    ('PARENTESE_A', r'\('),
    ('PARENTESE_F', r'\)'),
    ('NAME', r'[-a-zA-Z]+'),
    ('COMMENT', r';.*'),
    ('NEWLINE', r'\n'),
    ('SPACE', r'\s+')
])

tokens = [
    'RIGHT', 'LEFT', 'INC', 'DEC', 'SUB', 'ADD', 'NUMBER', 'PRINT', 'LOOP',
    'READ', 'DEF', 'PARENTESE_F', 'PARENTESE_A', 'DO', 'NAME'
]

operator = lambda type_op: (type_op)
    FCall = sk.opt(name=str, fargs=list)


Number = Expr.Number
BinOp = Expr.BinOp
FCall = Expr.FCall


#
# Calc lexer
#
lexer = ox.make_lexer([
    ('NUMBER', r'\d+(\.\d+)?'),
    ('STRING', r'"[^"]*"'),
    ('OP', r'[-+*/<>=?@&$^~%]+'),
    ('COMMENT', r'\#[^\n]*'),
    ('LPAR', r'\('),
    ('RPAR', r'\)'),
    ('FNAME', r'[a-z]+'),
    ('COMMA', r'\,'),
])

tokens = ['NUMBER', 'OP', 'LPAR', 
          'RPAR', 'FNAME', 'COMMA']

#
# Calc parser
#
identity = (lambda x: x)
op_call = (lambda x, op, y: BinOp(op, x, y))
fcall = (lambda x, y, z, w: FCall(x, z))
示例#24
0
import ox
import click
import pprint

lexer = ox.make_lexer([
    ('OPEN_P', r'\('),
    ('CLOSE_P', r'\)'),
    ('OP', r'[-a-zA-Z]+'),
    ('NUMBER', r'[0-9]+'),
    ('ignore_COMMENT', r';[^\n]*'),
    ('ignore_NEWLINE', r'\s+'),
])

tokens = ['OP', 'NUMBER', 'OPEN_P', 'CLOSE_P']

atom = lambda x: x
term = lambda x: (x, )
comp = lambda x, y: (x, ) + y
pare = lambda x, y: '()'
expr = lambda x, y, z: y
parser = ox.make_parser([
    ('expr : OPEN_P term CLOSE_P', expr),
    ('expr : OPEN_P CLOSE_P', pare),
    ('term : atom term', comp),
    ('term : atom', term),
    ('atom : expr', atom),
    ('atom : OP', atom),
    ('atom : NUMBER', atom),
], tokens)

示例#25
0
"""
Implementa uma calculadora simples utilizando a biblioteca ox.
"""

import ox
import operator as op

OP_TO_FUNCTION = {'+': op.add, '-': op.sub, '*': op.mul, '/': op.truediv}

#
# Lexer
#
lexer = ox.make_lexer([
    ('NUMBER', r'[0-9]+(\.[0-9]+)?'),
    ('OP', r'[-+*/]'),
])

#
# Parser
#
binop = (lambda a, op, b: OP_TO_FUNCTION[op](a, b))
parser = ox.make_parser([
    ('term : term OP atom', binop),
    ('term : atom', lambda x: x),
    ('atom : NUMBER', float),
])

#
#
#
if __name__ == '__main__':
示例#26
0
from getch import getche
import click
import pprint
import ox

lexer = ox.make_lexer([
    ('COMMENT', r';(.)*'),
    ('NEW_LINE', r'\n+'),
    ('OPEN_BRACKET', r'\('),
    ('CLOSE_BRACKET', r'\)'),
    ('NAME', r'[a-zA-Z_][a-zA-Z_0-9-]*'),
    ('NUMBER', r'\d+(\.\d*)?'),
])

token_list = [
    'NAME',
    'NUMBER',
    'OPEN_BRACKET',
    'CLOSE_BRACKET',
]

identity = lambda x: x

parser = ox.make_parser([
    ('tuple : OPEN_BRACKET elements CLOSE_BRACKET', lambda a, x, b: x),
    ('tuple : OPEN_BRACKET CLOSE_BRACKET', lambda a, b: '[]'),
    ('elements : term elements', lambda x, xs: [x] + xs),
    ('elements : term', lambda x: [x]),
    ('term : atom', identity),
    ('term : tuple', identity),
    ('atom : NAME', identity),
示例#27
0
import ox
import click
import pprint
from lisp_interpreter import LispInterpreter

#Define lexer rules
lisp_lexer = ox.make_lexer([('COMMENT', r';.*'), ('NEWLINE', r'\n'),
                            ('SPACE', r'\s+'), ('NAME', r'[-a-zA-Z]+'),
                            ('NUMBER', r'\d+'), ('PARENT_OPEN', r'\('),
                            ('PARENT_CLOSE', r'\)')])

#Define tokens
tokens = ['PARENT_CLOSE', 'PARENT_OPEN', 'NUMBER', 'NAME']


#Methods for parser
def name(name):
    return name


def number(number):
    return int(number)


def single_operator(operator):
    return operator


def parent_expr(parent_open, expr, parent_close):
    return expr
示例#28
0
import ox
import click
import pprint

lexer = ox.make_lexer([
    ('LOOP', r'loop'),
    ('DEC', r'dec'),
    ('INC', r'inc'),
    ('LPAR', r'\('),
    ('RPAR', r'\)'),
    ('RIGHT', r'right'),
    ('LEFT', r'left'),
    ('PRINT', r'print'),
    ('READ', r'read'),
    ('DO', r'do'),
    ('DO_AFTER', r'do-after'),
    ('DO_BEFORE', r'do-before'),
    ('ADD', r'add'),
    ('SUB', r'sub'),
    ('NUMBER', r'[0-9]+'),
    ('ignore_COMMENT', r';[^\n]*'),
    ('ignore_BREAK_LINE', r'\n'),
    ('ignore_SPACE', r'\s+')
])

tokens_list = ['LOOP',
               'DEC',
               'INC',
               'LPAR',
               'RPAR',
               'RIGHT',
示例#29
0
import ox #biblioteca do parse -- Só funciona em python3
import  operator as op 

#Criar lexer : Função que recebe string de código e retorna tokens (Recebe lista de tuplas - nome do token e expressão regular)

lexer = ox.make_lexer([
    ('NUMBER', r'\d+(\.\d*)?'), # /d = [0-9]
    ('OP_S', r'[-+]'),    
    ('OP_M', r'[*/]'),
])

# Função recebe o token jogado na funcao func, retorna objeto  que é associado à "átomo"
#Converte string func = float em número par uso posterior

tokens_list = ['NUMBER','OP']
infix = lambda x, op, y : (op, x, y) #Só organiza a ordem dos operadores 
#Lambda define uma funcao simples (lambda argumentos e :retorno)
atom = lambda x: ('atom',float(x)) # Expressão "SEXY ;p" -- função átomo recebe um token e retorna a árvore sintática
parser = ox.make_parser([ 
	('expr : expr OP_S term', infix),
	('expr : term', lambda x : x),
	('term : term OP_M atom', infix),
	('term : atom', lambda x : x),
    ('atom : NUMBER', atom) # Reduz numéro em um átomo
  #  ('expr : atom OP atom', --> (OP x y)) # lisp (lista em python) - Operadores infixo, prefixo, sufixo
], tokens_list)

# ------------------------FUNÇÕES ----------------------------------------------------------------------------

OP_TO_FUNC = {
	'+': lambda x , y : x + y,
示例#30
0
import ox

lexer = ox.make_lexer([('NUMBER', r'[0-9]+(\.[0-9]+?)'), ('OP_PRI', r'[*/]'),
                       ('OP_SEC', r'[-+]')])

OPERATORS = {
    '+': lambda x, y: x + y,
    '-': lambda x, y: x - y,
    '*': lambda x, y: x * y,
    '/': lambda x, y: x / y,
}

tokens = ['NUMBER', 'OP']
parser = ox.make_parser([('term : term OP atom', lambda a, op, b: OPERATORS[op]
                          (a, b)), ('term : atom', lambda x: x),
                         ('atom : NUMBER', float)], tokens)

st = input('expr: ')
tokens = lexer(st)
print(lexer(st))

res = parser(tokens)
print(res)