Exemplo n.º 1
0
 def yield_parse_result(line1, line2):
     subsitution_parser = Parser('{}v{var_name} = {value};')
     if (result := subsitution_parser.parse(line1)
         ) and 'v' + result.named['var_name'] in line2 and any(
             op in line2 for op in STREAM_OPERATORS):
         var_name, value = result.named['var_name'], result.named['value']
         return '', line2.replace('v' + var_name, value)
Exemplo n.º 2
0
def run():
    data = open_file(argv[1])
    lexer = Lexer(data)
    tokens = lexer.lex()

    parser = Parser(tokens)
    parser.parse()
Exemplo n.º 3
0
 def yield_parse_result(line):
     std_operator_parser = Parser('{indent}std::operator{op}({params});')
     if (result := std_operator_parser.parse(line)
         ) and result.named['op'] in STREAM_OPERATORS:
         indent, op, params = result.named[
             'indent'], ' ' + result.named['op'], result.named['params']
         return indent + op.join(params.split(',')) + ';'
Exemplo n.º 4
0
    def solve(self, expr: str = ""):
        """solve cryptarithm problem"""
        print("Problem: {}".format(expr))

        p = Parser(tokenize(expr))
        pr = Problem(p.parse())
        print(pr.search_all_solution())
Exemplo n.º 5
0
class Generator:

    output = None
    parser = None

    is_generate_code = True

    def __init__(self):
        self.parser = Parser()


    def set_generate_code_flag(self, is_generate_code):
        self.is_generate_code = is_generate_code


    def start(self, contracts):
        for contract_path in contracts:
            parse_info = self.parser.parse(contract_path)
            self.generate_documentation(parse_info)


    def set_output(self, output):
        self.output = output


    def gen_header(self, f, info):
        f.write("# " + info.contract_name + "\n")
        #f.write("\n`Solidity version " + info.version + "`\n")


    def gen_public_functions(self, f, info):
        f.write("\n#### Functions\n")
        for function in info.functions:
            if function.internal or function.private: continue
            f.write("- " + function.name + "\n")


    def gen_events(self, f, info):
        f.write("\n#### Events\n")
        for event in info.events:
            f.write("- " + event.name + "\n")
            #if event.notice != None: f.write("`Description:`\t" + event.notice + '\n\n')


    def gen_public_members(self, f, info):
        f.write("\n#### Members\n")
        for member in info.public_members:
            f.write("- " + member.name + " : " + member.type + "\n")
            #if member.notice != None: f.write("`Description:`\t" + member.notice + '\n\n')


    def generate_documentation(self, info):
        with open(self.output, 'w') as f:
            self.gen_header(f, info)
            self.gen_public_functions(f, info)
            self.gen_events(f, info)
            self.gen_public_members(f, info)
            f.close()
Exemplo n.º 6
0
def intermediate(code):
    parser = Parser(code)
    block = parser.parse()
    # print(block)
    fd = {'params': {'var': True, 'params': []}, 'block': block}
    info = new_func_info(None, fd)
    info.add_local_var('_ENV')
    cg_func_def_exp(info, fd, 0)
    # print(info.sub_funcs[0].ins)
    return info
Exemplo n.º 7
0
def set_parser():
    """docstring for set_parser"""
    parse = Parser()
    parse.add_description('Csv plotter')
    parse.parser.add_argument('--csv',
                              required=True,
                              help='Path to the csv file')
    parse.parser.add_argument('--x',
                              required=True,
                              type=int,
                              default=0,
                              help='Csv file column to use for x axis')
    parse.parser.add_argument('--y',
                              required=True,
                              type=int,
                              default=0,
                              help='Csv file column to use for y axis')
    parse.parse()

    return parse
Exemplo n.º 8
0
def run(s):
    scanner = Scanner(s, error)
    tokens = scanner.scan_tokens()
    parser = Parser(tokens, parse_error)
    statements = parser.parse()
    if hasError:
        return
    if hasRuntimeError:
        return
    interpreter = Interpreter()
    interpreter.interpret(statements)
Exemplo n.º 9
0
    def test_parser(self):
        cases = [('(1+2)<>3', ['1', '2', '+', '3', '<>']),
                 ('1+2*3', ['1', '2', '3', '*', '+']),
                 ('1 >= 2', ['1', '2', '>=']),
                 ('(1*2+9) = (2*3-6)',
                  ['1', '2', '*', '9', '+', '2', '3', '*', '6', '-', '='])]

        parser = Parser()

        for expr, correct in cases:
            self.assertEqual(parser.parse(expr), correct)
Exemplo n.º 10
0
def preview(id):
    record = db_session.query(Page).get(id)
    if record is not None:
        parser = Parser()
        html = parser.parse(record.data)
        html = html.replace('</body>', '{{ post_body|safe }}</body>')

        last_saved = record.updated.strftime('%B %d, %Y at %I:%M%p')
        post_body = render_template('preview_post_body.html', last_saved=last_saved)
        return render_template_string(html, post_body=post_body),
    return render_template('empty_preview.html')
Exemplo n.º 11
0
def getReply(message):
    p = Parser()
    parse_tree = p.parse(message)

    translation = list()
    find_best_translation(parse_tree, translation)
    print(message)
    print(translation)
    answer = " ".join(translation)

    # return the formulated answer
    return answer
Exemplo n.º 12
0
class BaseRoute(Generic[_V]):
    """The base route class.

    This is referenced as `_R` in the rest of this module.

    # Parameters
    pattern (str): an URL pattern.
    view (_V):
        a view function or object whose actual type is defined by concrete
        routes.
    """
    def __init__(self, pattern: str, view: _V):
        if pattern != WILDCARD and not pattern.startswith("/"):
            pattern = f"/{pattern}"
        self._pattern = pattern
        self._parser = Parser(self._pattern)
        self.view = view

    @property
    def pattern(self) -> str:
        return self._pattern

    def url(self, **kwargs) -> str:
        """Return the full URL path for the given route parameters.

        # Parameters
        kwargs (dict): route parameters.

        # Returns
        url (str):
            A full URL path obtained by formatting the route pattern with
            the provided route parameters.
        """
        return self._pattern.format(**kwargs)

    def parse(self, path: str) -> Optional[dict]:
        """Parse an URL path against the route's URL pattern.

        # Returns
        params (dict or None):
            If the URL path matches the URL pattern, this is a dictionary
            containing the route parameters, otherwise it is `None`.
        """
        result = self._parser.parse(path)
        return result.named if result is not None else None

    def _get_clone_kwargs(self) -> dict:
        return {"pattern": self._pattern, "view": self.view}

    def clone(self: _R, **kwargs: Any) -> _R:
        kwargs = {**self._get_clone_kwargs(), **kwargs}
        return type(self)(**kwargs)
Exemplo n.º 13
0
def generate_code(path):
    msg = ""
    line_no = 0
    lexer = Lexer().get_lexer()
    pg = Parser()
    pg.parse()
    parser = pg.get_parser()
    with open(path) as fp:
        line_no = line_no + 1
        for line in fp:
            try:
                if ((len(line.replace('\n', "")) > 0) and ('#' not in line)):
                    token = lexer.lex(line.replace('\n', ""))
                    parser.parse(token)
                line_no = line_no + 1
            except:
                msg = "Syntax error in line No " + str(line_no)
                return msg
    fp.close()
    Code = Codegen()
    write_file(Code.doit(), path)
    return "Successfully generated"
Exemplo n.º 14
0
def make_unfiltered_rules_from_corpus():
    parser = Parser()
    parsed_sents = set()
    with open("../corpus.txt", "r") as corpus_file:
        for line in corpus_file.readlines():
            tree = parser.parse(line)
            parsed_sents.add(tree)
    ruleset = set()
    for iterable in parsed_sents:
        for tree in iterable:
            for rule in tree.productions():
                ruleset.add(rule)
    return '\n'.join([str(r) for r in ruleset])
Exemplo n.º 15
0
def main(args):
    """Main entry point for Boink. Apply necessary operations
    with the given arguments.

    Args:
        args (Namespace): Arguments supplied by the user.
    """

    if args.command == 'run':
        try:

            with open(args.file, 'r') as file:
                # Extra newline for parsing.
                inp = file.read() + '\n'

        except FileNotFoundError:

            print(Colr(f"File '{args.file}' not found.", fore="red"))
            return

        # Handler for exceptions during lexing, parsing and semantic analysis.
        error_handler = ErrorHandler()

        lexer_ = Lexer(inp, error_handler)

        # Assign lexer to convert positions to line and offset.
        error_handler.lexer = lexer_
        parser_ = Parser(lexer_, error_handler)

        # Root node of the program a.k.a. the program itself.
        # Argument is the program name which is equivalent to file's name.
        root = parser_.parse(os.path.basename(args.file))

        parser_.log_tree(root)

        symbol_tree_builder = SymbolTableBuilder(error_handler)
        symbol_tree_builder.visit(root)

        symbol_tree_builder.log_info()

        if not error_handler.errors:
            interpreter_ = Interpreter(root, error_handler)
        else:
            error_handler.log_all()
    else:
        print(Colr(f"Command {repr(args.command)} doesn't exist", fore="red"))
        return
Exemplo n.º 16
0
def compile(p):
    #Initialize symbols into a sequence of tokens
    lexer = Lexer(p)
    #Parse the regular expression
    parser = Parser(lexer)
    #Create tokens for each character in p
    tokens = parser.parse()
    #Handles NFA construction
    handler = Handler()

    nfa_stack = []

    for t in tokens:
        handler.handlers[t.name](t, nfa_stack)

    #return an NFA
    return nfa_stack.pop()
Exemplo n.º 17
0
def run(fn, text):
    lexer = Lexer(fn, text)
    tokens, error = lexer.make_tokens()
    if error: return None, error

    #Generate Abstract Syntax Tree
    parser = Parser(tokens)
    ast = parser.parse()
    if ast.error: return None, ast.error

    #Run the AST through the Interpreter
    interpreter = Interpreter()
    context = Context('<program>')
    context.symbol_table = global_symbol_table
    result = interpreter.visit(ast.node, context)

    return result.value, result.error
Exemplo n.º 18
0
def jawbreaker():

    # filter arguments
    arguments = parse_args()
    filename = arguments.filename
    outname = arguments.outname
    run = arguments.run

    # generate the code
    l = Lexer(filename)
    p = Parser(l)
    code = p.parse()
    
    # write to the file
    with open(outname, 'w') as out:
        out.write(code)

    # run if specified
    if run:
        os.system('python ' + outname)
Exemplo n.º 19
0
def run(fn, text):
    lexer_ = Lexer(fn, text)
    tokens, error = lexer_.make_tokens()

    if error:
        return '', error
    print(text)
    print(tokens)

    parser = Parser(tokens)
    ast = parser.parse()

    print(ast.node)

    interpreter_ = Interpreter()
    res = interpreter_.visit(ast.node)

    print(res['sites'][0]['url'])

    return res, ''
Exemplo n.º 20
0
def compile(p, debug=False):
    def print_tokens(tokens):
        for t in tokens:
            print(t)

    lexer = Lexer(p)
    parser = Parser(lexer)
    tokens = parser.parse()

    handler = Handler()

    if debug:
        print_tokens(tokens)

    nfa_stack = []

    for t in tokens:
        handler.handlers[t.name](t, nfa_stack)

    assert len(nfa_stack) == 1
    return nfa_stack.pop()
Exemplo n.º 21
0
def main():
    parser = argparse.ArgumentParser(
        description="SPI - Simple Pascal Interpreter")
    parser.add_argument("inputfile", help="Pascal source file")
    parser.add_argument(
        "--scope",
        help="Print scope information",
        action="store_true",
    )
    parser.add_argument(
        "--stack",
        help="Print call stack",
        action="store_true",
    )
    args = parser.parse_args()

    global _SHOULD_LOG_SCOPE, _SHOULD_LOG_STACK
    _SHOULD_LOG_SCOPE, _SHOULD_LOG_STACK = args.scope, args.stack

    text = open(args.inputfile, "r").read()
    # print(text)

    lexer = Lexer(text)

    try:
        parser = Parser(lexer)
        tree = parser.parse()
    except (LexerError, ParserError) as e:
        print(e.message)
        sys.exit(1)

    semantic_analyzer = SemanticAnalyzer(_SHOULD_LOG_SCOPE)
    try:
        semantic_analyzer.visit(tree)
    except SemanticError as e:
        print(e.message)
        sys.exit(1)

    interpreter = Interpreter()
    interpreter.interpret(tree)
Exemplo n.º 22
0
def compile(p, debug = False):
    
    def print_tokens(tokens):
        for t in tokens:
            print(t)

    lexer = Lexer(p)
    parser = Parser(lexer)
    tokens = parser.parse()

    handler = Handler()
    
    if debug:
        print_tokens(tokens) 

    nfa_stack = []
    
    for t in tokens:
        handler.handlers[t.name](t, nfa_stack)
    
    assert len(nfa_stack) == 1
    return nfa_stack.pop() 
Exemplo n.º 23
0
def publish(id):
    record = db_session.query(Page).get(id)
    if record is not None:
        parser = Parser()
        html = parser.parse(record.data)
        if record.slug is not None:
            dir_name = record.slug
        else:
            dir_name = record.key

        if record.access == 'public':
            file_path = '%s%s' % (config['generate']['public_path'], dir_name)
            url = dir_name
        elif record.access == 'limited':
            file_path = '%s%s' % (config['generate']['limited_path'], dir_name)
            url = '%s?key=%s' % (dir_name, record.key)
        else:
            # access is private so do nothing
            return jsonify(published=record.published.strftime('%B %d, %Y at %I:%M%p'))

        if not os.path.exists(file_path):
            os.makedirs(file_path)

        full_path = '%s/index.html' % file_path
        with open(full_path, "w") as fh:
            fh.write(html)

        # set published date
        record.published = datetime.now()
        db_session.add(record)
        db_session.commit()
        return jsonify(
            published_date=record.published.strftime('%B %d, %Y at %I:%M%p'),
            url=url
        )

    return jsonify(error=True, message="Not found")
Exemplo n.º 24
0
async def solve_cryptarithm(expr: str = '') -> Dict[str, Any]:
    p = Parser(tokenize(expr))
    pr = Problem(p.parse())
    status, solutions = pr.search_all_solution()
    return {"problem": expr, "answer": solutions, "status": status}
Exemplo n.º 25
0
    tile_height = 6
    row = 1
    col = 8

    left_start = origin + 0 * tile_width
    # top_start = origin + 6 * tile_height
    top_start = origin + 70 + 4

    scenes = []
    tasks = []

    for left in range(left_start, left_start + col * tile_width, tile_width):
        for top in range(top_start, top_start + row * tile_height,
                         tile_height):
            parser = Parser()
            s = parser.parse(testfile)
            scenes.append(s)

            scenes[-1].sampler = TileSampler(left, top, tile_width,
                                             tile_height)
            name = out_dir + '%d.%d.png' % (left, top)
            scenes[-1].film = copy.deepcopy(scenes[-1].film)
            scenes[-1].film.filename = name
            tasks.append(scenes[-1].render)

    p = Pool(len(tasks))
    for s in scenes:
        print(s.film.filename)
        print('%d, %d' % (s.sampler.left, s.sampler.top))

    for task in tasks:
Exemplo n.º 26
0
    use_stemming = False
    for o, a in opts:
        if o in ("-s", "--stemming"):
            use_stemming = True

    print "use stemming: %d" % use_stemming

    filelist = [(path + f) for f in os.listdir(path)]

    parser = Parser(fstopname)
    for stem in [use_stemming]:
        for idf in True, False:
            print "Parsing files...",
            stdout.flush()
            parser.parse(filelist, stem)
            # Ignore the 10% least and most frequent words
            parser.words = slice_sorted_words(parser.words, 30)
            print "done"

            print "Normalizing frequencies...",
            stdout.flush()
            # Don't modify the original set
            for i, doc in enumerate(parser.docset):
                normalize(doc, parser.words, idf)
                print i
            gc.collect()
            print "done"

            for chooser in choose_initial_pp, choose_initial:
                for k in 10, 20, 30, 40:
Exemplo n.º 27
0
        dot.edge(p_name, name)
        for n in node.params:
            see_node(name, n, dot)
        see_node(name, node.body, dot)
        for n in node.localvars:
            see_node(name, n, dot)
    elif type(node) == CompoundStmtNode:
        dot.node(name, str(node.kind))
        dot.edge(p_name, name)
        for n in node.stmts:
            see_node(name, n, dot)
    elif type(node) == IfStmtNode:
        dot.node(name, str(node.kind))
        dot.edge(p_name, name)
        see_node(name, node.cond, dot)
        see_node(name, node.then, dot)
        if not node.els is None:
            see_node(name, node.els, dot)

if __name__ == '__main__':
    path = './t1.c'
    l = Lexer(filename(path), read_file(path))
    l.lex()
    l.see_tokens()

    p = Parser(l.tokens)
    p.parse()

    see_ast(p.ast)

Exemplo n.º 28
0
from pascal_loader.main_io import PascalFile
from parse import Parser
from emulator import Emulator

if __name__ == '__main__':
    pretty_printer = pprint.PrettyPrinter()

    # UNCOMMENT the below statements one at a time

    # tokens = get_token(PascalFile(input_file_location='simple_assignment.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='complex_assignments.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='control_repeat.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='control_while.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='control_if.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='control_for.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='case_statement.pas', output_location=''))
    tokens = get_token(PascalFile(input_file_location='arrays.pas', output_location=''))

    # This prints tokens, uncomment to see the generated tokens
    # pretty_printer.pprint(tokens)
    print '----------------------------------'
    # setting verbose=True to parser will print to console as tokens are matched/warnings
    # parser = Parser(token_list=tokens, verbose=True)
    parser = Parser(token_list=tokens)
    byte_array = parser.parse()
    # This prints the byte array, uncomment to see the bytearray
    # pretty_printer.pprint(byte_array)
    print '----------------------------------'
    emulator = Emulator(byte_array)
    emulator.start()
Exemplo n.º 29
0

precedence = {
    '||': 5,
    '&&': 5,
    '>': 6,
    '>=': 6,
    '<=': 6,
    '<': 6,
    '==': 6,
    '!=': 6,
    '+': 7,
    '-': 7,
    '*': 8,
    '/': 8,
    'UMINUS': 9
}
tokens = list(Lexer('1+-10*123').lex())
precs = {'UMINUS': ['E', '-', 'E'], 'POSITIVE': ['E', '+', 'E']}
# for t in tokens:
#     print(t)
# print(sm.productions())
# parser = Parser(productions, terminal, nonterminal)
parser = Parser(sm.productions,
                sm.terminal,
                sm.nonterminal,
                precedence=precedence,
                precs=precs)
parser.generate()
parser.parse(tokens, sm.sdmap)
print(calls)
Exemplo n.º 30
0
from parse import Parser

parser = Parser(file="cfg")
parser.parse()
Exemplo n.º 31
0
from lexer import Lexer
from parse import Parser
from codegen import CodeGen

text_input = """
print(4 + 4 - 2);
"""

lexer = Lexer().get_lexer()
tokens = lexer.lex(text_input)

codegen = CodeGen()

module = codegen.module
builder = codegen.builder
printf = codegen.printf

pg = Parser(module, builder, printf)
pg.parse()
parser = pg.get_parser()
parser.parse(tokens).eval()

codegen.create_ir()
codegen.save_ir("output.ll")
Exemplo n.º 32
0
# print(sm.terminal)
t2p = {'id': '[a-zA-Z_]\w*', 'num': '\d+'}
# lexer = Lexer('node/test3.dm',sm.terminal,t2p)
lexer = Lexer('node/test6.dm', sm.terminal, t2p)
# lexer = Lexer('test2.dm',sm.terminal,t2p)
# print(list(lexer.lex()))
# for t in lexer.lex():
#     print(t)

# parser.generate()
# parser.dumpjson()
parser.loadjson()
# parser.htmlparse('test.html')
tokens = list(lexer.lex())
tree = parser.parse(tokens, sm.sdmap)  #, debug=True)
# typeCheck = TypeCheck(tree)
# typeCheck.init()
# typeCheck.accept()
# inter = Interperter(tree)
# inter.accept()
# Parser.printitems(sm.productions)
# cnt = Counter([p[0] for p in sm.productions])
# print(cnt)
past = AstPrintVisitor(tree)

past.accept()
# gast  = AstTraversalVisitor(tree)
# print(list(gast.accept()))
print(calls)
Exemplo n.º 33
0
    use_stemming = False
    for o, a in opts:
        if o in ('-s','--stemming'):
            use_stemming = True

    print 'use stemming: %d' % use_stemming

    filelist = [(path + f) for f in os.listdir(path)]

    parser = Parser(fstopname)
    for stem in [use_stemming,]:
        for idf in True, False:
            print 'Parsing files...',
            stdout.flush()
            parser.parse(filelist[:10000], stem)
            # Ignore the 30% least and most frequent words
            parser.words = slice_sorted_words(parser.words, 30)
            print 'done'

            print 'Normalizing frequencies...',
            stdout.flush()
            # Don't modify the original set
            for i, doc in enumerate(parser.docset):
                normalize(doc, parser.words, idf)
                print i
            gc.collect()
            print 'done'

            for chooser in [choose_initial]: #choose_initial_pp, choose_initial:
                for k in [10]:
Exemplo n.º 34
0
from Pascal_Helper_Files.pascal_reader import PascalFile
from parse import Parser  # parser
from emulator import Emulator  # emulator

if __name__ == '__main__':
    pretty_printer = pprint.PrettyPrinter()

    # UNCOMMENT the below statements one at a time

    # tokens = get_token(PascalFile(input_file_location='array_example.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='assignment_example.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='for_example.pas', output_location=''))
    # tokens = get_token(PascalFile(input_file_location='if_example.pas', output_location=''))
    tokens = get_token(
        PascalFile(input_file_location='while_example.pas',
                   output_location=''))

    # UNCOMMENT THE LINE BELOW TO TEST THE SCANNER --> YOU WILL SEE THE TOKENS
    # pretty_printer.pprint(tokens)

    print '----------------------------------'

    # UNCOMMENT THE LINE BELOW TO TEST THE PARSER
    # parser = Parser(token_list=tokens, verbose=True)
    parser = Parser(token_list=tokens)
    byte_array = parser.parse()
    # This prints the byte array, uncomment to see the bytearray
    # pretty_printer.pprint(byte_array)
    print '----------------------------------'
    emulator = Emulator(byte_array)
    emulator.start()
Exemplo n.º 35
0
#		starts at root of the of the parse tree and checks if the
#		phrase at each node is in the idiom cache or dictionary
def find_best_translation(parse_tree, translation):

	for root in parse_tree:
		if(isinstance(root, str)):
			translation.append(root)
			return
		phrase = " ".join(root.leaves())
		class_of = find_class_of(phrase)
		if find_in_cache(phrase) != None:
			translation.append(__idiom_dict[phrase]) 
		elif class_of != None:
			translation.append(find_target_language_translation(class_of, "mandarin"))
		else:
			find_best_translation(root, translation)

	return




if __name__ == '__main__':
	p = Parser()
	parse_tree = p.parse("I am cleaning the bathroom")

	translation = list()
	find_best_translation(parse_tree, translation)

	print(translation)
Exemplo n.º 36
0
from lex import Lexer
from parse import Parser
from codegen import CodeGen

fname = "input.toy"
with open(fname) as f:
    text_input = f.read()

lexer = Lexer().get_lexer()
tokens = lexer.lex(text_input)

codegen = CodeGen()

module = codegen.module
builder = codegen.builder
printf = codegen.printf

#~ pg = Parser(module, builder, printf)
pg1 = Parser()
pg1.parse()
parser = pg1.get_parser()
parser.parse(tokens)

codegen.create_ir()
#~ codegen.save_ir("output.ll")