예제 #1
0
파일: main.py 프로젝트: plesner/neutrino
 def schedule_files(self):
   for filename in self.flags.file:
     source = open(filename, "rt").read()
     tokens = token.tokenize(source)
     module = ast.Module(filename)
     parser.Parser(tokens, module).parse_program()
     self.schedule_for_compile(module)
     self.schedule_for_output(module)
예제 #2
0
파일: main.py 프로젝트: tundra/neutrino
 def schedule_files(self):
   files = self.flags.files or []
   for filename in files:
     source = open(filename, "rt").read()
     tokens = token.tokenize(source)
     module = ast.Module(filename)
     nparser.Parser(tokens, module).parse_program()
     self.schedule_for_compile(module)
     self.schedule_for_output(module)
예제 #3
0
파일: main.py 프로젝트: plesner/neutrino
 def run_parse_input(self, inputs, parse_thunk):
   for expr in inputs:
     tokens = token.tokenize(expr)
     unit = parse_thunk(tokens)
     # Implicitly import the core module into the oldest stage. There needs to
     # better model for this but for now it helps make builtin methods slightly
     # less magic.
     unit.get_oldest_stage().add_import(data.Path(['core']))
     self.schedule_for_compile(unit)
     self.schedule_for_output(unit)
예제 #4
0
파일: main.py 프로젝트: tundra/neutrino
 def run_parse_input(self, inputs, parse_thunk):
   for expr in inputs:
     tokens = token.tokenize(expr)
     unit = parse_thunk(tokens)
     # Implicitly import the core module into the oldest stage. There needs to
     # better model for this but for now it helps make builtin methods slightly
     # less magic.
     unit.get_oldest_stage().add_import(data.Path(['core']))
     self.schedule_for_compile(unit)
     self.schedule_for_output(unit)
예제 #5
0
 def answer_query(self, query):
     """Answer a query"""
     tokens = token.tokenize(query)
     if tokens[0][1] == 'Is' and tokens[-1][1] == '?':
         return property(tokens[2:-1])
     if len(tokens) == 7 and tokens[-3][1] == 'in':
         return convert(tokens)
     if len(tokens) == 13 and tokens[-3][1] == '=' and tokens[6][1] == '+':
         return "x = " + str(solveSimple(tokens))
     if len(tokens) == 13 and tokens[-3][1] == '=' and tokens[6][1] == '*':
         return "x = " + str(solveMult(tokens))
     try:
         result = infix_parser.infix_eval(query)
         return result
     except:
         return 85
예제 #6
0
if __name__ == '__main__':
    default_examples = [
        '뿡',
        '뿍뿍',
        '뽁뽁',
        '~',
        '뿌직',
        '쀼직',
        '뽀옹',
        '뽀뽀옹',
        '북',
        '부북',
        '부부북',
        '부부부북',
        '=3',
        '==3',
        '빵',
        '빠아앙',
    ]

    for idx, example in enumerate(examples):
        print(f'##### Test [{idx}] - {example}')
        result = tokenize(example)
        print(f'-> {result}')

    mix_example = '뿡뿍뿍뽁뽁~뿌직쀼직뽀옹뽀뽀옹북부북부부북부부부북=3==3빵빠아앙'

    print(f'###### Final Test - {mix_example}')
    result = tokenize(mix_example)
    print(f'-> {result}')
예제 #7
0
파일: main.py 프로젝트: plesner/neutrino
 def parse_source_file(self, name):
   source = open(name, "rt").read()
   tokens = token.tokenize(source)
   parser.Parser(tokens, self.module).parse_program()
예제 #8
0
파일: main.py 프로젝트: plesner/neutrino
 def parse_manifest(self):
   source = open(self.manifest_file, "rt").read()
   tokens = token.tokenize(source)
   return parser.ModuleParser(tokens).parse_module_manifest()
예제 #9
0
def main(input_file: str, debug: bool = False):
    with open(input_file, 'r') as bang_gwi_file:
        code = bang_gwi_file.read()
        thread = Interpreter(20, debug)
        for op in tokenize(code):
            thread.run(op)
예제 #10
0
파일: main.py 프로젝트: tundra/neutrino
 def parse_source_file(self, name):
   source = open(name, "rt").read()
   tokens = token.tokenize(source)
   nparser.Parser(tokens, self.module, name).parse_program()
예제 #11
0
파일: main.py 프로젝트: tundra/neutrino
 def parse_manifest(self):
   source = open(self.manifest_file, "rt").read()
   tokens = token.tokenize(source)
   return nparser.ModuleParser(tokens).parse_module_manifest()