コード例 #1
0
def test_divison():
    interpreter = Interpreter('30/2')
    result = interpreter.expr()
    assert result == 15.0
コード例 #2
0
ファイル: day09.py プロジェクト: cescara/aoc2019
def part1(program):
    return Interpreter(program, [1]).run()
コード例 #3
0
	def test_numbers(self):
		value = Interpreter().visit(NumberNode(51.2))
		self.assertEqual(value, Number(51.2))
コード例 #4
0
 def __init__(self, configJSON):
     self.interpreter = Interpreter(configJSON)
     self.config = self.interpreter.getConfig()
     self.pluginTemplate = MuseUI(self.interpreter)
     pass
コード例 #5
0
 def get_interpreter(self):
     return Interpreter(self.lang_opts)
コード例 #6
0
def test_interpreter():
    assert Interpreter(Lexer("1 + (2 * 3) + (4 * (5 + 6))")).expr() == 51
    assert Interpreter(Lexer("2 * 3 + (4 * 5)")).expr() == 46
コード例 #7
0
(This time I did a compiler to Python)
'''

from lexer import Lexer
from astparser import Parser

text = '''
PROGRAM hello;
VAR
  hi : INTEGER 
  
BEGIN
 a := 1 + 1; {this is a comment} b := a
END.
'''
if __name__ == '__main__':
    #text = input('parse> ')
    '''
    ll = Lexer('a := 1')
    print('Token', c)
    while c.type  != 'EOF':
        print(c)
        c = ll.get_next_token()
    '''

    l = Lexer(text)
    p = Parser(l)
    from interpreter import Interpreter
    i = Interpreter(p.program())
    print(i.visit())
コード例 #8
0
from cpu import Cpu
from interpreter import Interpreter
import struct

if __name__ == "__main__":
    src = './code/factorial.asm'
    i = Interpreter()
    i.asm_decoder(src)

    vm = Cpu("./code/factorial.o")
    # print(vm.__get_data_from_memory(0))
    # memory = 4 + 2 * 4
    # pos = 0
    vm.start()
    # vm.mm[0:4] = bytearray(struct.pack('I', 42))
    # print(struct.unpack('I', vm.mm[pos:pos + 4]))
    vm.close()

コード例 #9
0
ファイル: award_driver.py プロジェクト: pappasc/cs467maia
    def create_pdf(self, data):
        """Build, interpret, and distribute PDF award

        Arguments: 
            self
            data:   dictionary. POST request data, as well as data from the result of the post request
                    keys = award_id, authorizing_user_id, receiving_user_id, awarded_datetime, type
        Returns: 
            True if email was POST to database successful. False if unsuccessful.
            Does NOT Return the result of following handling of PDF. 
            This is designed so that an admin could manually generate award & send out if there was an issue with PDF creation -- 
            the user isn't prevented from creating the award if there IS a problem.
        """
        # Set up instances of helper classes
        builder = Builder(self.connection_data, data['type'])
        interpreter = Interpreter()
        distributer = Distributer(data['award_id'])

        # Build the Award Contents
        logging.info('AwardDriver.create_pdf(): building award contents')
        award_data = builder.query_database_for_data(data)
        modified_award_tex = builder.generate_award_tex(award_data)
        image = builder.query_bucket_for_image(award_data['SignaturePath'])

        # Initialize boolean success/failure variables to None
        pdf, write_successful = (None, None)
        email_successful, deletion_successful, distributed_updated = (False,
                                                                      False,
                                                                      False)

        # Build PDF from TEX + JPG if building award contents was successful
        if image is not None and modified_award_tex is not None:
            logging.info('AwardDriver.create_pdf(): building PDF')
            pdf = interpreter.interpret(award_data['SignaturePath'],
                                        modified_award_tex, image)

        # Send email if we have a PDF generated, or just say we did if email_on is False (for testing)
        if pdf is not None:
            # Technically don't NEED to additionally write to storage bucket, but it allows for
            # us to not lose award PDF if something goes wrong in this function
            # Writing to storage bucket instead of a SQL database because
            #   1) this is transient / temporary data
            #   2) it is not best practice to store files in a SQL database.
            logging.info(
                'AwardDriver.create_pdf(): saving PDF to storage bucket')
            write_successful = interpreter.write_award_to_bucket(
                data['award_id'], pdf)
            if self.email_on is True:
                logging.info('AwardDriver.create_pdf(): distributing email')
                email_successful = distributer.email_receiving_user(
                    pdf, award_data['email_address'], data['type'])
            else:
                email_successful = True

            # Show we sent email in database -- even if we're using no-email
            if email_successful is True:
                logging.info(
                    'AwardDriver.create_pdf(): updating distributed in database'
                )
                distributed_updated = distributer.update_distributed_in_database(
                    self.connection_data)

        # Clean-up PDF from storage bucket
        if email_successful is True and write_successful is True:
            logging.info(
                'AwardDriver.create_pdf(): deleting PDF from storage bucket')
            deletion_successful = distributer.delete_award_from_bucket()

        # Only returns true if email sent
        logging.info(
            'AwardDriver.create_pdf(): returning {}'.format(email_successful))
        return email_successful
コード例 #10
0
 def test_hello(self):
     sys.stdout = io.StringIO('')
     code = '++++++++++[>+++++++>++++++++++>+++>+<<<<-]>++.>+.+++++++..+++.>++.<<+++++++++++++++.>.+++.------.--------.>+.>.'
     Interpreter(code).run()
     self.assertEqual(sys.stdout.getvalue(), 'Hello World!\n')
コード例 #11
0
 def test_nested_loop(self):
     sys.stdout = io.StringIO('')
     code = '++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.'
     Interpreter(code).run()
     self.assertEqual(sys.stdout.getvalue(), 'Hello World!\n')
コード例 #12
0
 def test_A(self):
     sys.stdout = io.StringIO('')
     code = '+++++[>+++++++++++++<-]>.'
     Interpreter(code).run()
     self.assertEqual(sys.stdout.getvalue(), 'A')
コード例 #13
0
def interact():
    itp = Interpreter()
    while True:
        line = raw_input(">>")
        line = line.strip()
        itp.interpret(line)
コード例 #14
0
def test_whitespace():
    interpreter = Interpreter('3 + 5')
    result = interpreter.expr()
    assert result == 8
コード例 #15
0
 def __init__(self, debug=False):
     self.interpreter = Interpreter()
     self.history = []
     self.debug = debug
コード例 #16
0
def part2():
    print(Interpreter(read_input(), 5).run())
コード例 #17
0
from interpreter import Interpreter
from astcompiler import Compiler
from parser_ import Parser
from lexer import Lexer

kod = """
isim = oku("Adın ne? > ")
yaz("Merhaba", isim + "!")
"""

bytecode = Compiler().compile_ast(Parser(Lexer()).parse(kod))

Interpreter().execute_bytecode(bytecode)

print(bytecode)
コード例 #18
0
def test_part1():
    assert Interpreter([1002, 4, 3, 4, 33], [1]).run() == []
    assert Interpreter([1101, 100, -1, 4, 0], [1]).run() == []
コード例 #19
0
def init_dialog_manager(max_clients, dialog_manager_port):
    """ Dialog Manager process that listens to messages from the classifier
    :param max_clients: maximum number of clients that can connect to the dialog manager at a time
    :param dialog_manager_port: port the dialogue manager socket is running on
    :return: None
    """
    global socket_conn
    host = socket.gethostname()
    dialog_manager_port = dialog_manager_port
    server_socket = socket.socket()
    server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
    server_socket.bind((host, dialog_manager_port))
    server_socket.listen(max_clients)

    USER_MEMORY = OrderedDict()
    USER_TREES = OrderedDict()
    USER_SLOTS = OrderedDict()

    while True:
        try:
            socket_conn, address = server_socket.accept()
            data = socket_conn.recv(1024).decode()

            if data:
                message_data = json.loads(data)
                session_id = message_data['sessionid']
                intent_name = message_data['intent']
                slot_data = message_data['slots']
                action_type = message_data['action_type']
                user_message = message_data['message']

                user_intent_slots = USER_SLOTS.get(session_id)
                if slot_data is not None:
                    if user_intent_slots is None:
                        user_intent_slots = slot_data
                        USER_SLOTS[session_id] = user_intent_slots
                    else:
                        user_intent_slots.extend(slot_data)

                if action_type is not None:
                    user_intent_memory = USER_MEMORY.get(session_id)
                    user_intent_tree = USER_TREES.get(session_id)
                    node_id = user_intent_tree[0]
                    tree = user_intent_tree[1]
                    interpreter = Interpreter(tree, user_message,
                                              user_intent_slots,
                                              user_intent_memory, node_id)
                    response_data = interpreter.interpret()
                else:
                    intent = get_intent(intent_name)
                    lexer = Lexer(intent)
                    parser = Parser(lexer)
                    tree = parser.parse()

                    interpreter = Interpreter(tree,
                                              user_message, user_intent_slots,
                                              OrderedDict(), -1)
                    response_data = interpreter.interpret()

                print('')
                print('Run-time GLOBAL_MEMORY contents:')
                for k, v in sorted(interpreter.GLOBAL_MEMORY.items()):
                    print('%s = %s' % (k, v))
                interpreter = None

                if response_data is not None:
                    response_text = response_data.response_text
                    tree = response_data.tree
                    node_id = response_data.node_id
                    global_memory = response_data.global_memory
                    action_type = response_data.action_type

                    USER_MEMORY[session_id] = global_memory
                    USER_TREES[session_id] = (node_id, tree)

                    message_data = {}
                    message_data['sessionid'] = session_id
                    message_data['response_text'] = response_text
                    message_data['action_type'] = action_type
                    message_data['intent'] = intent_name
                    message = json.dumps(message_data)
                    socket_conn.send(message.encode())

                    if action_type == ACTION_EXIT:
                        del USER_MEMORY[session_id]
                        del USER_TREES[session_id]
                        del USER_SLOTS[session_id]
                else:
                    message_data = {}
                    message_data['sessionid'] = session_id
                    message_data['response_text'] = None
                    message_data['action_type'] = None
                    message_data['intent'] = None
                    message = json.dumps(message_data)
                    socket_conn.send(message.encode())

        except KeyboardInterrupt:
            socket_conn.close()

    # intent = get_intent('PieceIntent')
    # lexer = Lexer(intent)
    # parser = Parser(lexer)
    # tree = parser.parse()
    # #
    # # semantic_analyser = SemanticAnalyser()
    # # try:
    # #     semantic_analyser.visit(tree)
    # # except Exception as e:
    # #     print(e)
    # #
    # interpreter = Interpreter(tree, 'test', USER_SLOTS)
    # response_data = interpreter.interpret()

    print('')
    print('Run-time GLOBAL_MEMORY contents:')
    for k, v in sorted(interpreter.GLOBAL_MEMORY.items()):
        print('%s = %s' % (k, v))
コード例 #20
0
def part1():
    print(Interpreter(read_input(), 1).run())
コード例 #21
0
def run():
    opt = parse_argument()

    if not os.path.isdir(opt.prog_save_path):
        os.makedirs(opt.prog_save_path)
    if not os.path.isdir(opt.imgs_save_path):
        os.makedirs(opt.imgs_save_path)

    print('========= arguments =========')
    for key, val in vars(opt).items():
        print("{:20} {}".format(key, val))
    print('========= arguments =========')

    # data loader
    test_set = ShapeNet3D(opt.data)
    test_loader = DataLoader(
        dataset=test_set,
        batch_size=opt.batch_size,
        shuffle=False,
        num_workers=opt.num_workers,
    )

    # model
    ckpt = torch.load(opt.model)
    model = BlockOuterNet(ckpt['opt'])
    model.load_state_dict(ckpt['model'])
    if opt.is_cuda:
        model = model.cuda()
        cudnn.benchmark = True

    # test the model and evaluate the IoU
    ori_shapes, gen_shapes, pgms, params = test_on_shapenet_data(
        epoch=0, test_loader=test_loader, model=model, opt=opt, gen_shape=True)
    IoU = BatchIoU(ori_shapes, gen_shapes)
    print("Mean IoU: {:.3f}".format(IoU.mean()))

    # execute the generated program to generate the reconstructed shapes
    # for double-check purpose, can be disabled
    num_shapes = gen_shapes.shape[0]
    res = []
    for i in range(num_shapes):
        data = execute_shape_program(pgms[i], params[i])
        res.append(data.reshape((1, 32, 32, 32)))
    res = np.concatenate(res, axis=0)
    IoU_2 = BatchIoU(ori_shapes, res)

    assert abs(IoU.mean() - IoU_2.mean()) < 0.1, 'IoUs are not matched'

    # save results
    save_file = os.path.join(opt.save_path, 'shapes.h5')
    f = h5py.File(save_file, 'w')
    f['data'] = gen_shapes
    f['pgms'] = pgms
    f['params'] = params
    f.close()

    # Interpreting programs to understandable program strings
    if opt.save_prog:
        interpreter = Interpreter(translate, rotate, end)
        num_programs = gen_shapes.shape[0]
        for i in range(min(num_programs, opt.num_render)):
            program = interpreter.interpret(pgms[i], params[i])
            save_file = os.path.join(opt.prog_save_path, '{}.txt'.format(i))
            with open(save_file, 'w') as out:
                out.write(program)

    # Visualization
    if opt.save_img:
        data = gen_shapes.transpose((0, 3, 2, 1))
        data = np.flip(data, axis=2)
        num_shapes = data.shape[0]
        for i in range(min(num_shapes, opt.num_render)):
            voxels = data[i]
            save_name = os.path.join(opt.imgs_save_path, '{}.png'.format(i))
            visualization(voxels,
                          threshold=0.1,
                          save_name=save_name,
                          uniform_size=0.9)
コード例 #22
0
def vqahelper(app):

    if  not vqahelper.interpreter:
        vqahelper.interpreter = Interpreter()

    return vqahelper.interpreter
コード例 #23
0
    def __init__(self, param):
        np.random.seed(1)
        torch.manual_seed(999)
        #if torch.cuda.is_available(): torch.cuda.manual_seed_all(999)
        self.param = param
        self.run_interpreter = True
        self.run_validation = False
        self.generate_data = False
        self.param = param
        if not self.generate_data and os.path.exists(self.param['model_dir'] +
                                                     '/model_data.pkl'):
            self.pickled_train_data = pkl.load(
                open(self.param['model_dir'] + '/model_data.pkl'))
        else:
            self.pickled_train_data = {}
        if 'use_kb_emb' not in self.param:
            self.param['use_kb_emb'] = True
        #self.param['use_kb_emb'] = False
        self.starting_epoch = 0
        self.starting_overall_step_count = 0
        self.starting_validation_reward_overall = 0
        self.starting_validation_reward_topbeam = 0
        if 'dont_look_back_attention' not in self.param:
            self.param['dont_look_back_attention'] = False
        if 'concat_query_npistate' not in self.param:
            self.param['concat_query_npistate'] = False
        if 'query_attention' not in self.param:
            self.param['query_attention'] = False
        if self.param['dont_look_back_attention']:
            self.param['query_attention'] = True
        if 'single_reward_function' not in self.param:
            self.param['single_reward_function'] = False
        if 'terminate_prog' not in self.param:
            self.param['terminate_prog'] = False
            terminate_prog = False
        else:
            terminate_prog = self.param['terminate_prog']
        if 'none_decay' not in self.param:
            self.param['none_decay'] = 0

        if 'train_mode' not in self.param:
            self.param['train_mode'] = 'reinforce'
        self.qtype_wise_batching = self.param['questype_wise_batching']
        self.read_data = ReadBatchData(param)
        if self.param['question_type'] == 'all':
            self.param['question_type'] = ','.join(
                self.read_data.all_questypes_inv.values())
        print "initialized read data"
        if 'relaxed_reward_till_epoch' in self.param:
            relaxed_reward_till_epoch = self.param['relaxed_reward_till_epoch']
        else:
            self.param['relaxed_reward_till_epoch'] = [-1, -1]
            relaxed_reward_till_epoch = [-1, -1]
        if 'params_turn_on_after' not in self.param:
            self.param['params_turn_on_after'] = 'epoch'
        if self.param['params_turn_on_after'] != 'epoch' and self.param[
                'params_turn_on_after'] != 'batch':
            raise Exception('params_turn_on_after should be epoch or batch')
        if 'print' in self.param:
            self.printing = self.param['print']
        else:
            self.param['print'] = False
            self.printing = True
        if 'prune_beam_type_mismatch' not in self.param:
            self.param['prune_beam_type_mismatch'] = 0
        if 'prune_after_epoch_no.' not in self.param:
            self.param['prune_after_epoch_no.'] = [
                self.param['max_epochs'], 1000000
            ]
        if self.param['question_type'] == 'verify':
            boolean_reward_multiplier = 1
        else:
            boolean_reward_multiplier = 0.1
        if 'print_valid_freq' not in self.param:
            self.param['print_valid_freq'] = self.param['print_train_freq']
        if 'valid_freq' not in self.param:
            self.param['valid_freq'] = 100
        if 'unused_var_penalize_after_epoch' not in self.param:
            self.param['unused_var_penalize_after_epoch'] = [
                self.param['max_epochs'], 1000000
            ]
        unused_var_penalize_after_epoch = self.param[
            'unused_var_penalize_after_epoch']
        if 'epoch_for_feasible_program_at_last_step' not in self.param:
            self.param['epoch_for_feasible_program_at_last_step'] = [
                self.param['max_epochs'], 1000000
            ]
        if 'epoch_for_biasing_program_sample_with_target' not in self.param:
            self.param['epoch_for_biasing_program_sample_with_target'] = [
                self.param['max_epochs'], 1000000
            ]
        if 'epoch_for_biasing_program_sample_with_last_variable' not in self.param:
            self.param[
                'epoch_for_biasing_program_sample_with_last_variable'] = [
                    self.param['max_epochs'], 100000
                ]
        if 'use_var_key_as_onehot' not in self.param:
            self.param['use_var_key_as_onehot'] = False
        reward_func = "f1"
        self.param['reward_function'] = "f1"
        if 'relaxed_reward_strict' not in self.param:
            relaxed_reward_strict = False
            self.param['relaxed_reward_strict'] = relaxed_reward_strict
        else:
            relaxed_reward_strict = self.param['relaxed_reward_strict']
        if param['parallel'] == 1:
            raise Exception(
                'Need to fix the intermediate rewards for parallelly executing interpreter'
            )
        for k, v in param.items():
            print 'PARAM: ', k, ':: ', v
        print 'loaded params '
        self.train_data = []
        if os.path.isdir(param['train_data_file']):
            self.training_files = [
                param['train_data_file'] + '/' + x
                for x in os.listdir(param['train_data_file'])
                if x.endswith('.pkl')
            ]
        elif not isinstance(param['train_data_file'], list):
            self.training_files = [param['train_data_file']]
        else:
            self.training_files = param['train_data_file']
            random.shuffle(self.training_files)
        self.valid_data = []
        if os.path.isdir(param['test_data_file']):
            self.valid_files = [
                param['test_data_file'] + '/' + x
                for x in os.listdir(param['test_data_file'])
                if x.endswith('.pkl')
            ]
        elif not isinstance(param['test_data_file'], list):
            self.valid_files = [param['test_data_file']]
        else:
            self.valid_files = param['test_data_file']
        for file in self.valid_files:
            temp = pkl.load(open(file))
            #temp = self.rectify_ques_type(temp)
            #temp = self.remove_bad_data(temp)
            temp = self.add_data_id(temp)
            self.valid_data.extend(temp)
        if self.qtype_wise_batching:
            self.valid_data_map = self.read_data.get_data_per_questype(
                self.valid_data)
            self.valid_batch_size_types = self.get_batch_size_per_type(
                self.valid_data_map)
            self.n_valid_batches = int(
                math.ceil(
                    float(sum([len(x)
                               for x in self.valid_data_map.values()]))) /
                float(self.param['batch_size']))
        else:
            self.n_valid_batches = int(
                math.ceil(
                    float(len(self.valid_data)) /
                    float(self.param['batch_size'])))

        if not os.path.exists(param['model_dir']):
            os.mkdir(param['model_dir'])
        self.model_file = os.path.join(param['model_dir'], param['model_file'])
        learning_rate = param['learning_rate']
        start = time.time()
        self.model = NPI(param, self.read_data.none_argtype_index, self.read_data.num_argtypes, \
                         self.read_data.num_progs, self.read_data.max_arguments,
                         self.read_data.wikidata_rel_embed, self.read_data.wikidata_rel_date_embed, \
                         self.read_data.vocab_init_embed, self.read_data.program_to_argtype, \
                         self.read_data.program_to_targettype)
        self.checkpoint_prefix = os.path.join(param['model_dir'],
                                              param['model_file'])
        if os.path.exists(self.checkpoint_prefix):
            self.model.load_state_dict(torch.load(self.checkpoint_prefix))
            fr = open(self.param['model_dir'] + '/metadata.txt').readlines()
            self.starting_epoch = int(fr[0].split(' ')[1].strip())
            self.starting_overall_step_count = int(fr[1].split(' ')[1].strip())
            self.starting_validation_reward_overall = float(
                fr[2].split(' ')[1].strip())
            self.starting_validation_reward_topbeam = float(
                fr[3].split(' ')[1].strip())
            print 'restored model'
        end = time.time()
        if torch.cuda.is_available():
            self.model.cuda()
        self.optimizer = torch.optim.Adam(self.model.parameters(),
                                          lr=learning_rate,
                                          betas=[0.9, 0.999],
                                          weight_decay=1e-5)
        print self.model
        #self.printing = False
        print 'model created in ', (end - start), 'seconds'

        self.interpreter = Interpreter(self.param['freebase_dir'], self.param['num_timesteps'], \
                                       self.read_data.program_type_vocab, self.read_data.argument_type_vocab, False, terminate_prog, relaxed_reward_strict, reward_function = reward_func, boolean_reward_multiplier = boolean_reward_multiplier, relaxed_reward_till_epoch=relaxed_reward_till_epoch, unused_var_penalize_after_epoch=unused_var_penalize_after_epoch)
        if self.param['parallel'] == 1:
            self.InterpreterProxy, self.InterpreterProxyListener = proxy.createProxy(
                self.interpreter)
            self.interpreter.parallel = 1
            self.lock = Lock()
        self.rule_based_logs = json.load(
            open(self.param['freebase_dir'] +
                 '/rulebased_program_jaccard_date_operation.json'))
        self.aggregated_results = {}
        print "initialized interpreter"
コード例 #24
0
 def test_none(self):
     number = Interpreter().evaluate(None)
     self.assertEqual(number, None)
コード例 #25
0
ファイル: day09.py プロジェクト: cescara/aoc2019
def test_part1():
    assert Interpreter([109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]).run() == \
           [109, 1, 204, -1, 1001, 100, 1, 100, 1008, 100, 16, 101, 1006, 101, 0, 99]
    assert len(
        str(Interpreter([1102, 34915192, 34915192, 7, 4, 7, 99,
                         0]).run()[0])) == 16
    assert Interpreter([104, 1125899906842624,
                        99]).run()[0] == 1125899906842624
    assert Interpreter([109, -1, 4, 1, 99]).run()[0] == -1
    assert Interpreter([109, -1, 104, 1, 99]).run()[0] == 1
    assert Interpreter([109, -1, 204, 1, 99]).run()[0] == 109
    assert Interpreter([109, 1, 9, 2, 204, -6, 99]).run()[0] == 204
    assert Interpreter([109, 1, 109, 9, 204, -6, 99]).run()[0] == 204
    assert Interpreter([109, 1, 209, -1, 204, -106, 99]).run()[0] == 204
    assert Interpreter([109, 1, 3, 3, 204, 2, 99]).run()[0] == 0
    assert Interpreter([109, 1, 203, 2, 204, 2, 99]).run()[0] == 0
コード例 #26
0
 def test_numbers(self):
     number = Interpreter().evaluate(IntNode(2))
     self.assertEqual(number, NumberValue(2))
     number = Interpreter().evaluate(FloatNode(2.0))
     self.assertEqual(number, NumberValue(2.0))
コード例 #27
0
ファイル: day09.py プロジェクト: cescara/aoc2019
def part2(program):
    return Interpreter(program, [2]).run()
コード例 #28
0
# The Pickle Programming Language.

from lexer import Lexer
from parse import Parser
from interpreter import Interpreter
from sys import argv
from os import system
from printf import printf
from SymbolTable import global_symbol_table

system("cls")
system("@echo The Pickle Programming Language.")
version = "0.0.1"
system(f"@echo @{version}")

line_number = 0

while True:
    text = input(f"pickle > ")
    lexer = Lexer(text)
    tokens = lexer.generate_tokens()
    parser = Parser(tokens)
    tree = parser.generate_tree()
    interpreter = Interpreter(global_symbol_table)
    if not tree:
        print(f"\u007b{line_number}\u007d => null")
        line_number += 1
        continue
    else:
        print(f"\u007b{line_number}\u007d => {interpreter.visit(tree)}")
        line_number += 1
コード例 #29
0
from interpreter import Interpreter
from lexer import Lexer
from parser_ import Parser
from interpreter import Interpreter

while True:
    try:
        text = input("clac >")
        lexer = Lexer(text)
        tokens = lexer.generate_tokens()
        parser = Parser(tokens)
        tree = parser.parse()

        if not tree: continue

        interpreter = Interpreter()
        value = interpreter.visit(tree)
        print(value)
    except Exception as e:
        print(e)
コード例 #30
0
def test_addition():
    interpreter = Interpreter('3+4')
    result = interpreter.expr()
    assert result == 7