예제 #1
0
from lexer import Lexer

fi = open("test.c", "r")
toTokenize = fi.read()
fi.close()

lex = Lexer()
lex.setTokens()
tokens = lex.tokenize(toTokenize)
print(tokens)
예제 #2
0
from lexer import Lexer
from parser_ import Parser
from interpreter import Interpreter
import matplotlib.pyplot as plt


while True:
	try:
		text = input("calc > ")
		# list values
		list_x, list_y = [], []
		# quantity_point
		n = int(input('Quantity point = '))
		for i in range(n):
			value_x = input('x = ')
			lexer = Lexer(text, value_x, list_x)
			list_x = lexer.Valuex()
			tokens = lexer.generate_tokens()
			parser = Parser(tokens)
			tree = parser.parse()
			if not tree: continue
			interpreter = Interpreter()
			value = interpreter.visit(tree)
			list_y.append(value)
			print('y = ', value)
		print(list_x)
		print(list_y)

		plt.plot(list_x, list_y)
		plt.show()
예제 #3
0
import os
from lexer import Lexer
from parse import Parser
import os
from version import *

os.system('cls' if os.name == 'nt' else 'clear')

print("equationsolver@{}".format(version))

while True:
    text = input('solve > ')
    lexer = Lexer(text)
    tokens = lexer.generate_tokens()
    ast = Parser(tokens).parse()
    if not ast: continue
    print(ast)
예제 #4
0
from lexer import Lexer
from parsers import Parser

if __name__ == "__main__":
    file = open('test.txt')
    text_input = file.read()
    file.close()
    lexer = Lexer().build()
    lexer.input(text_input)
    # while True:
    #     tok = lexer.token()
    #     if not tok:
    #         break
    #     print(tok)

    parser_p = Parser()
    parser_p.build().parse(text_input, lexer, False)
예제 #5
0
파일: parser.py 프로젝트: ysp80/elasticsql
 def __init__(self):
   super(Parser, self).__init__()
   #self.arg = arg
   lexer = Lexer()
   self.tokens = lexer.tokens
   self.build()
예제 #6
0
def main():
    text = input()
    lexer = Lexer(text)
    ast = Parser(lexer)
    print(eval(ast))
예제 #7
0
 def __init__(self, origin):
     self.tokens = Lexer(origin) 
     self.tokens.selectNext() 
예제 #8
0
 def test_lexer_2(self):
     l = Lexer('!Q').tokenize()
     self.assertEqual(l.kind, [TokenKind.NOT, TokenKind.ID])
예제 #9
0
 def test_parser_10(self):
     tokelist = Lexer('(A/\B)\/C').tokenize()
     parseTree = Parser().parse(tokelist)
     self.assertEqual(parseTree, 'Syntax Error at line 1 column 6.')
예제 #10
0
        tokelist = Lexer('B\/(C),A/\!B').tokenize()
        parse_tree = Parser().parse(tokelist)
        self.assertEqual(parse_tree, [
            'propositions', 'proposition', 'compound', 'atomic', 'ID',
            'connective', 'OR', 'proposition', 'compound', 'LPAR',
            'proposition', 'atomic', 'ID', 'RPAR', 'more-proposition', 'comma',
            'propositions', 'proposition', 'compound', 'atomic', 'ID',
            'connective', 'AND', 'proposition', 'compound', 'NOT',
            'proposition', 'atomic', 'ID', 'more-proposition', 'epsilon'
        ])


#print sys.argv

#if run as just main.py do unit tests
if __name__ == '__main__' and len(sys.argv) == 1:
    unittest.main()
#else take the input file and run it through lexer and parser
else:
    with open(sys.argv[1], 'r') as file:
        for index, line in enumerate(file):
            print line
            l = Lexer(line)
            l.line = index + 1
            tokelist = l.tokenize()
            print tokelist.kind
            l.line += 1
            parse_tree = Parser().parse(tokelist)
            print parse_tree
            print "\n----------\n"
예제 #11
0
 def test_parser_7(self):
     tokelist = Lexer('!Q)P!').tokenize()
     parseTree = Parser().parse(tokelist)
     self.assertEqual(parseTree, 'Syntax Error at line 1 column 3.')
예제 #12
0
def code_to_s_expr(code):
    return s_expr(Parser(Lexer(code).lex()).expression())
예제 #13
0
from lexer import Lexer

lexer = Lexer([('A', '(a|b)')], 'eof')

tokens = lexer('ab')
lexer.automaton.graph().write_png('a.png')
print(tokens)
예제 #14
0
def build_python_lexer():
    dfa = build_dfa()
    return Lexer(token_types, dfa, ignored_chars)
예제 #15
0
# Author: Yiyang Zeng yz3622
from lexer import Lexer
from parser import ProjectParser
from ast_nodes import errors, declarations, usage, print_queue
import os

dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
scanner = Lexer()
global ast
ast = None


def scan_file(fname, test_name):
    print("-----------------------------Running " + test_name +
          "-----------------------------")

    # Read the file
    with open(fname) as f:
        code = f.read()

    # lex the source code
    tokens = scanner.input(code)
    parser = ProjectParser()
    global ast
    ast = parser.input(tokens)
    ast.eval()
    print("--------------------Declarations--------------------")
    for scope, item, line, item_type in declarations:
        print("%s: declare \"%s\", %s %s" %
              (line, item, scope, str(item_type)))
    print("--------------------Variable Usage--------------------")
예제 #16
0
 def test_lexer_3(self):
     l = Lexer('P <=> Q').tokenize()
     self.assertEqual(l.kind, [TokenKind.ID, TokenKind.IFF, TokenKind.ID])
예제 #17
0
            "help", "delete-tautologies", "forward-subsumption",
            "backward-subsumption"
            "given-clause-heuristic=", "neg-lit-selection="
        ])
    except getopt.GetoptError as err:
        print(sys.argv[0], ":", err)
        sys.exit(1)

    params = processOptions(opts)

    problem = ClauseSet()
    for file in args:
        fp = open(file, "r")
        input = fp.read()
        fp.close()
        lex = Lexer(input)
        problem.parse(lex)

    state = ProofState(params, problem)
    res = state.saturate()

    print(state.statisticsStr())
    if res != None:
        print("# SZS status Unsatisfiable")
        proof = res.orderedDerivation()
        enableDerivationOutput()
        for s in proof:
            print(s)
        disableDerivationOutput()
    else:
        print("# SZS status Satisfiable")
예제 #18
0
 def test_lexer_4(self):
     l = Lexer('( P /\ Q )').tokenize()
     self.assertEqual(l.kind, [
         TokenKind.LPAR, TokenKind.ID, TokenKind.AND, TokenKind.ID,
         TokenKind.RPAR
     ])
예제 #19
0
    @_("brack_expression")
    def expression(self, p):
        return p.brack_expression

    @_("paren_expression")
    def expression(self, p):
        return p.paren_expression

    @_("brace_expression")
    def expression(self, p):
        return p.brace_expression

    @_("quoted_expression")
    def expression(self, p):
        return p.quoted_expression

    precedence = [
        ("right", QUOTE),
        ("right", PIPE),
        ("left", LBRACK),
    ]


if __name__ == "__main__":
    lexer = Lexer()
    parser = Parser()

    def parse(txt):
        return parser.parse(lexer.tokenize(txt))
예제 #20
0
 def test_lexer_6(self):
     l = Lexer(')Q').tokenize()
     self.assertEqual(l.kind, [TokenKind.RPAR, TokenKind.ID])
예제 #21
0
class Parser:
    tokens = Lexer().tokens

    def __init__(self):
        pass

    def p_program(self, p):
        """program : begin"""
        print("program : begin")

    def p_start_1(self, p):
        """begin : declist MAIN LRB RRB block"""
        print("begin : declist MAIN LRB RRB block")

    def p_start_2(self, p):
        """begin : MAIN LRB RRB block"""
        print("begin : MAIN LRB RRB block")

    def p_declist_dec(self, p):
        """declist : dec"""
        print("declist : dec")

    def p_declist_declist(self, p):
        """declist : declist dec"""
        print("declist : declist dec")

    def p_dec_vardec(self, p):
        """dec : vardec"""
        print("dec : vardec")

    def p_dec_funcdec(self, p):
        """dec : funcdec"""
        print("dec : funcdec")

    def p_type_int(self, p):
        """type : INTEGER"""
        print("type : INTEGER")

    def p_type_float(self, p):
        """type : FLOAT"""
        print("type : FLOAT")

    def p_type_bool(self, p):
        """type : BOOLEAN"""
        print("type : BOOLEAN")

    def p_iddec_id(self, p):
        """iddec : ID"""
        print("iddec : ID")

    def p_iddec_id_s_exp(self, p):
        """iddec : ID LSB exp RSB"""
        print("iddec : ID LSB exp RSB")

    def p_iddec_id_assign_exp(self, p):
        """iddec : ID ASSIGN exp"""
        print("iddec : ID ASSIGN exp")

    def p_idlist_iddec(self, p):
        """idlist : iddec"""
        print("idlist : iddec")

    def p_idlist_idlist(self, p):
        """idlist : idlist COMMA iddec"""
        print("idlist : idlist COMMA iddec")

    def p_vardec(self, p):
        """vardec : type idlist SEMICOLON"""
        print("vardec : type idlist SEMICOLON")

    def p_funcdec_type(self, p):
        """funcdec : type ID LRB paramdecs RRB block"""
        print("funcdec : type ID LRB paramdecs RRB block")

    def p_funcdec_type_2(self, p):
        """funcdec : type ID LRB RRB block"""
        print("funcdec : type ID LRB RRB block")

    def p_funcdec_void(self, p):
        """funcdec : VOID ID LRB paramdecs RRB block"""
        print("funcdec : VOID ID LRB paramdecs RRB block")

    def p_funcdec_void_2(self, p):
        """funcdec : VOID ID LRB RRB block"""
        print("funcdec : VOID ID LRB RRB block")

    def p_paramdecs_list(self, p):
        """paramdecs : paramdecslist"""
        print("paramdecs : paramdecslist")

    def p_paramdecslist_paramdec(self, p):
        """paramdecslist : paramdec"""
        print("paramdecslist : paramdec")

    def p_paramdecslist_paramdecslist(self, p):
        """paramdecslist : paramdecslist COMMA paramdec"""
        print("paramdecslist : paramdecslist COMMA paramdec")

    def p_paramdec(self, p):
        """paramdec : type ID"""
        print("paramdec : type ID")

    def p_paramdec_s(self, p):
        """paramdec : type ID LSB RSB"""
        print("paramdec : type ID LSB RSB")

    def p_varlist_vardec(self, p):
        """varlist : vardec"""
        print("varlist : vardec")

    def p_varlist_varlist(self, p):
        """varlist : varlist vardec"""
        print("varlist : varlist vardec")

    def p_block(self, p):
        """block : LCB varlist stmtlist RCB"""
        print("block : LCB varlist stmtlist RCB")

    def p_block_varlist(self, p):
        """block : LCB varlist RCB"""
        print("block : LCB varlist RCB")

    def p_block_stmtlist(self, p):
        """block : LCB stmtlist RCB"""
        print("block : LCB stmtlist RCB")

    def p_block_empty(self, p):
        """block : LCB RCB"""
        print("block : LCB RCB")

    def p_stmtlist_stmt(self, p):
        """stmtlist : stmt"""
        print("stmtlist : stmt")

    def p_stmtlist_stmtlist(self, p):
        """stmtlist : stmtlist stmt"""
        print("stmtlist : stmtlist stmt")

    def p_lvalue_id(self, p):
        """lvalue : ID"""
        print("lvalue : ID")

    def p_lvalue_exp(self, p):
        """lvalue : ID LSB exp RSB"""
        print("lvalue : ID LSB exp RSB")

    def p_stmt_return(self, p):
        """stmt : RETURN exp SEMICOLON"""
        print("stmt : RETURN exp SEMICOLON")

    def p_stmt_exp(self, p):
        """stmt : exp SEMICOLON"""
        print("stmt : exp SEMICOLON")

    def p_stmt_block(self, p):
        """stmt : block"""
        print("stmt : block")

    def p_stmt_while(self, p):
        """stmt : WHILE LRB exp RRB stmt"""
        print("stmt : WHILE LRB exp RRB stmt")

    def p_stmt_for(self, p):
        """stmt : FOR LRB exp SEMICOLON exp SEMICOLON exp RRB stmt"""
        print("stmt : FOR LRB exp SEMICOLON exp SEMICOLON exp RRB stmt")

    def p_stmt_if(self, p):
        """stmt : IF LRB exp RRB stmt elseiflist %prec p2"""
        print("stmt : IF LRB exp RRB stmt elseiflist")

    def p_stmt_if_elseif(self, p):
        """stmt : IF LRB exp RRB stmt %prec p1"""
        print("stmt : IF LRB exp RRB stmt")

    def p_stmt_if_long(self, p):
        """stmt : IF LRB exp RRB stmt elseiflist ELSE stmt"""
        print("stmt : IF LRB exp RRB stmt elseiflist ELSE stmt")

    def p_stmt_if_long_long(self, p):
        """stmt : IF LRB exp RRB stmt ELSE stmt"""
        print("stmt : IF LRB exp RRB stmt ELSE stmt")

    def p_stmt_print(self, p):
        """stmt : PRINT LRB ID RRB SEMICOLON"""
        print("stmt : PRINT LRB ID RRB SEMICOLON")

    def p_elseiflist_elif(self, p):
        """elseiflist : ELIF LRB exp RRB stmt"""
        print("elseiflist : ELIF LRB exp RRB stmt")

    def p_elseiflist_elseiflist(self, p):
        """elseiflist : elseiflist ELIF LRB exp RRB stmt"""
        print("elseiflist : elseiflist ELIF LRB exp RRB stmt")

    def p_exp_lvalue_exp(self, p):
        """exp : lvalue ASSIGN exp"""
        print("exp : lvalue ASSIGN exp")

    def p_exp_sum(self, p):
        """exp : exp SUM exp"""
        print("exp : exp SUM exp")

    def p_exp_sub(self, p):
        """exp : exp SUB exp"""
        print("exp : exp SUB exp")

    def p_exp_mul(self, p):
        """exp : exp MUL exp"""
        print("exp : exp MUL exp")

    def p_exp_div(self, p):
        """exp : exp DIV exp"""
        print("exp : exp DIV exp")

    def p_exp_mod(self, p):
        """exp : exp MOD exp"""
        print("exp : exp MOD exp")

    def p_exp_or(self, p):
        """exp : exp OR exp"""
        print("exp : exp OR exp")

    def p_exp_and(self, p):
        """exp : exp AND exp"""
        print("exp : exp AND exp")

    def p_exp_const(self, p):
        """exp : const"""
        print("exp : const")

    def p_exp_lvalue(self, p):
        """exp : lvalue"""
        print("exp : lvalue")

    def p_exp_id_explist(self, p):
        """exp : ID LRB explist RRB"""
        print("exp : ID LRB explist RRB")

    def p_exp_r_exp(self, p):
        """exp : LRB exp RRB"""
        print("exp : LRB exp RRB")

    def p_exp_id(self, p):
        """exp : ID LRB RRB"""
        print("exp : ID LRB RRB")

    def p_exp_sub_exp(self, p):
        """exp : SUB exp"""
        print("exp : SUB exp")

    def p_exp_not_exp(self, p):
        """exp : NOT exp"""
        print("exp : NOT exp")

    def p_const_int(self, p):
        """const : INTEGERNUMBER"""
        print("const : INTEGERNUMBER")

    def p_const_float(self, p):
        """const : FLOATNUMBER"""
        print("const : FLOATNUMBER")

    def p_const_true(self, p):
        """const : TRUE"""
        print("const : TRUE")

    def p_const_false(self, p):
        """const : FALSE"""
        print("const : FALSE")

    def p_relop_gt(self, p):
        """exp : exp GT exp"""
        print("exp : exp GT exp")

    def p_relop_lt(self, p):
        """exp : exp LT exp"""
        print("exp : exp LT exp")

    def p_relop_ne(self, p):
        """exp : exp NE exp"""
        print("exp : exp NE exp")

    def p_relop_eq(self, p):
        """exp : exp EQ exp"""
        print("exp : exp EQ exp")

    def p_relop_le(self, p):
        """exp : exp LE exp"""
        print("exp : exp LE exp")

    def p_relop_ge(self, p):
        """exp : exp GE exp"""
        print("exp : exp GE exp")

    def p_explist_exp(self, p):
        """explist : exp"""
        print("explist : exp")

    def p_explist_explist(self, p):
        """explist : explist COMMA exp"""
        print("explist : explist COMMA exp")

    precedence = (
        ('left', 'COMMA'),
        ('right', 'ASSIGN'),
        ('right', 'NOT'),
        ('left', 'OR'),
        ('left', 'AND'),
        ('left', 'LT'),
        ('left', 'GT'),
        ('left', 'GE'),
        ('left', 'NE'),
        ('left', 'EQ'),
        ('left', 'LE'),
        ('left', 'p1'),
        ('left', 'p2'),
        ('left', 'ELSE'),
        ('left', 'ELIF'),
        ('left', 'SUM', 'SUB'),
        ('left', 'MUL', 'DIV', 'MOD'),
    )

    def p_error(self, p):
        print(p.value)
        raise Exception('ParsingError: invalid grammar at ', p)

    def build(self, **kwargs):
        """build the parser"""
        self.parser = yacc.yacc(module=self, **kwargs)
        return self.parser
예제 #22
0
 def test_lexer_7(self):
     l = Lexer('!Q)P!').tokenize()
     self.assertEqual(l.kind, [
         TokenKind.NOT, TokenKind.ID, TokenKind.RPAR, TokenKind.ID,
         TokenKind.NOT
     ])
예제 #23
0
        elif node.op.type == XLABEL:
            viewer.xlabel(str(self.visit(node.value)))
            return
        elif node.op.type == YLABEL:
            viewer.ylabel(str(self.visit(node.value)))
            return
        self.exception("{} not implemented".format(node))

    def visit_Op(self, node):
        if node.op.type == SHOW:
            viewer.show()
            return
        self.exception("{} not implemented".format(node))

    def visit_Val(self, node):
        """Visits a Value node"""
        if node.token.type in [INTEGER, STRING]:
            return node.value

        if not node.value in self.global_scope:
            self.exception("Could not dereference {}".format(node))
        return self.global_scope[node.value]


if __name__ == "__main__":
    input_text = 'BEGIN a = d20; xlabel "t1"; ylabel "t2"; print a; label "test"; plot a; show; END'
    ast = DiceParser(Lexer(input_text)).parse()
    print(ast)
    interpreter = Interpreter(ast)
    result = interpreter.interpret()
예제 #24
0
 def test_lexer_9(self):
     l = Lexer('Q\./P').tokenize()
     self.assertEqual(l.kind, [TokenKind.ID, TokenKind.XOR, TokenKind.ID])
예제 #25
0
 def eval_infix(self, text):
     print(text)
     self.lexer = Lexer(text)
     self.current_token = self.lexer.get_next_token()
     return self.multi_expr()
예제 #26
0
 def test_lexer_10(self):
     l = Lexer('(A/\B)\/C').tokenize()
     self.assertEqual(l.kind, [
         TokenKind.LPAR, TokenKind.ID, TokenKind.AND, TokenKind.ID,
         TokenKind.RPAR, TokenKind.OR, TokenKind.ID
     ])
예제 #27
0
from lexer import Lexer
from parser_ import Parser
from interpreter import Interpreter
import matplotlib.pyplot as plt

#Input equation-(Ex: "y = a * x + b")
text = input("calc > ")
# list values
list_x, list_y = [], []
# quantity_point
n = int(input('Quantity point = '))
for i in range(n):
    value_x = input('x = ')
    lexer = Lexer(text, value_x)
    list_x.append(float(value_x))
    tokens = lexer.generate_tokens()
    parser = Parser(tokens)
    tree = parser.parse()
    interpreter = Interpreter()
    value1 = interpreter.visit(tree)
    list_y.append(value1.value)
    print('y = ', value1)
plt.plot(list_x, list_y, "ro-")
plt.show()
예제 #28
0
from lexer import Lexer
from parser import Parser
import sys
import glob

pg = Parser()
pg.parse()
parser = pg.get_parser()
lexer = Lexer().get_lexer()
for path in glob.glob(sys.argv[1] + "/*.jack"):
    outputFile = sys.argv[1] + "/" + path.split("/")[-1] + ".vm"
    with open(path, "r") as f:
        print path
        text_input = f.read()
        #print text_input
        tokens = lexer.lex(text_input)
        oc = parser.parse(tokens).eval()
        oc = "\r\n".join(line for line in oc.split("\r\n") if len(line) > 0)
        #print(oc)
        with open(outputFile, 'w') as output_file:
            output_file.write(oc)
예제 #29
0
ath_lexer = Lexer([
    (r'(?s)/\*.*?\*/', None),  # Multi-line comment
    (r'//[^\n]*', None),  # Single-line comment
    (r'\s+', None),  # Whitespace
    # Code enclosures
    (r'\(', 'DELIMITER'),  # Group open
    (r'\)', 'DELIMITER'),  # Group close
    (r'{', 'DELIMITER'),  # Suite open
    (r'}', 'DELIMITER'),  # Suite close
    (r'\[', 'DELIMITER'),  # Symbol open
    (r'\]', 'DELIMITER'),  # Symbol close
    # Separators
    (r';', 'DELIMITER'),  # Statement separator
    (r',', 'DELIMITER'),  # Group operator
    # Boolean operators
    (r'\bl&', 'OPERATOR'),  # Boolean AND
    (r'\bl\|', 'OPERATOR'),  # Boolean OR
    (r'\bl\^', 'OPERATOR'),  # Boolean XOR
    # Bitwise operators
    (r'\bb&', 'OPERATOR'),  # Bitwise and
    (r'\bb\|', 'OPERATOR'),  # Bitwise or
    (r'\bb\^', 'OPERATOR'),  # Bitwise xor
    # Arithmetic operators
    (r'\+', 'OPERATOR'),  # Add, UnaryPos
    (r'-', 'OPERATOR'),  # Sub, UnaryInv
    (r'\^', 'OPERATOR'),  # Pow
    (r'\*', 'OPERATOR'),  # Mul
    (r'/_', 'OPERATOR'),  # FloorDiv
    (r'/', 'OPERATOR'),  # TrueDiv
    (r'%', 'OPERATOR'),  # Modulo
    # Symbol operators
    (r'!=!', 'OPERATOR'),  # Assert Both
    (r'!=\?', 'OPERATOR'),  # Assert Left
    (r'\?=!', 'OPERATOR'),  # Assert Right
    (r'~=!', 'OPERATOR'),  # Negate Left
    (r'!=~', 'OPERATOR'),  # Negate Right
    (r'~=~', 'OPERATOR'),  # Negate Both
    # Bitwise shift operators
    (r'<<', 'OPERATOR'),  # Bitwise lshift
    (r'>>', 'OPERATOR'),  # Bitwise rshift
    # Value operators
    (r'<=', 'OPERATOR'),  # Less than or equal to
    (r'<', 'OPERATOR'),  # Less than
    (r'>=', 'OPERATOR'),  # Greater than or equal to
    (r'>', 'OPERATOR'),  # Greater than
    (r'~=', 'OPERATOR'),  # Not equal to
    (r'==', 'OPERATOR'),  # Equal to
    # Statement keywords
    (r'DIE', 'KEYWORD'),  # Kill symbol
    (r'~ATH', 'KEYWORD'),  # Loop
    (r'print', 'KEYWORD'),  # Output
    (r'input', 'KEYWORD'),  # Input
    (r'import', 'KEYWORD'),  # Import another file
    (r'EXECUTE', 'KEYWORD'),  # Subroutine execution
    (r'INSPECT', 'KEYWORD'),  # Debug
    (r'REPLICATE', 'KEYWORD'),  # Deep copy to current frame
    (r'PROCREATE', 'KEYWORD'),  # Value declaration or modification
    (r'ENUMERATE', 'KEYWORD'),  # Split a string
    (r'BIFURCATE', 'KEYWORD'),  # Split a symbol
    (r'AGGREGATE', 'KEYWORD'),  # Merge a symbol
    (r'FABRICATE', 'KEYWORD'),  # Subroutine declaration
    (r'DIVULGATE', 'KEYWORD'),  # Return a symbol
    (r'DEBATE', 'KEYWORD'),  # Conditional Consequent
    (r'UNLESS', 'KEYWORD'),  # Conditional Alternative
    # Inverters
    (r'!', 'OPERATOR'),  # Boolean NOT
    (r'~', 'OPERATOR'),  # Bitwise not
    # Literals and Identifiers
    (r'([\'"])(?:[^\1]|\\\1)*?\1', 'LITERAL_STR'),
    (r'(\d+\.(\d*)?|\.\d+)([eE][-+]?\d+)?[jJ]', 'LITERAL_IMG'),
    (r'(\d+\.(\d*)?|\.\d+)([eE][-+]?\d+)?', 'LITERAL_FLT'),
    (r'\d+[jJ]', 'LITERAL_IMG'),
    (r'\d{1,3}(?:_\d{1,3})*', 'LITERAL_INT'),
    (r'[a-zA-Z]\w*', 'IDENTIFIER'),
    # Literally only used in DIE calls
    (r'\.', 'DELIMITER'),
])
import unittest
from lexer import Lexer, TokenKind
from parser import Parser

f = open("input.txt")
linecount = 1
for line in f:
    print("*****************")
    print("Input #", linecount)
    line = line.replace("\n", "")
    print("Proposition: ", line)
    tokenlist = Lexer(line, linecount).tokenize()
    linecount = linecount + 1
    parse_tree = Parser().parse(tokenlist, line)
    print("*********************")

# line = "( P /\ ! Q ) , ( ! P <=> ! Q )"
# print("Proposition: ", line)
# tokenlist = Lexer(line, 0).tokenize()
# parse_tree = Parser().parse(tokenlist, line)
# print("*********************")

# class Test(unittest.TestCase):
#     def test1(self):
#         l = Lexer('Q').tokenize()
#         print(l, l.kind, l.loc, TokenKind.ID)
#         self.assertEqual(l.kind, [TokenKind.ID])
#
#     def test2(self):
#         tokenlist = Lexer('!Q').tokenize()
#         parse_tree = Parser(tokenlist).parse()