Пример #1
0
    def __init__(self,
                 actions,
                 network_input_shape,
                 replay_memory_size=1024,
                 minibatch_size=32,
                 learning_rate=0.00025,
                 discount_factor=0.9,
                 dropout_prob=0.1,
                 epsilon=1,
                 epsilon_decrease_rate=0.99,
                 min_epsilon=0.1,
                 load_path=None,
                 logger=None):

        # Parameters
        self.network_input_shape = network_input_shape  # Shape of the DQN input
        self.actions = actions  # Size of the discrete action space
        self.learning_rate = learning_rate  # Learning rate for the DQN
        self.dropout_prob = dropout_prob  # Dropout probability of the DQN
        self.load_path = load_path  # Path from which to load the DQN's weights
        self.replay_memory_size = replay_memory_size  # Size of replay memory
        self.minibatch_size = minibatch_size  # Size of a DQN minibatch
        self.discount_factor = discount_factor  # Discount factor of the MDP
        self.epsilon = epsilon  # Probability of taking a random action
        self.epsilon_decrease_rate = epsilon_decrease_rate  # See update_epsilon
        self.min_epsilon = min_epsilon  # Minimum value for epsilon
        self.logger = logger

        # Replay memory
        self.max_loss_memory = Memory(capacity=self.replay_memory_size)
        self.training_count = 0

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        session = tf.Session(config=config)

        KTF.set_session(session)

        # Instantiate the deep Q-networks
        # Main DQN
        self.DQN = DQNetwork(self.actions,
                             self.network_input_shape,
                             learning_rate=self.learning_rate,
                             discount_factor=self.discount_factor,
                             minibatch_size=self.minibatch_size,
                             dropout_prob=self.dropout_prob,
                             load_path=self.load_path,
                             logger=self.logger)

        # Target DQN used to generate targets
        self.DQN_target = DQNetwork(self.actions,
                                    self.network_input_shape,
                                    learning_rate=self.learning_rate,
                                    discount_factor=self.discount_factor,
                                    minibatch_size=self.minibatch_size,
                                    dropout_prob=self.dropout_prob,
                                    load_path=self.load_path,
                                    logger=self.logger)
        # Reset target DQN
        self.DQN_target.model.set_weights(self.DQN.model.get_weights())
Пример #2
0
 def __init__(self, coef_memory=0.1, dropout_seq=0.9):
     super(KAST, self).__init__()
     self.kernel = 13
     self.dropout_seq = dropout_seq
     self.transformation = Transformation(trainable=False)
     self.resnet = ResNet()
     self.rkn = RKNModel()
     self.memory = Memory(unit=200, kernel=self.kernel)
     self.corr_cost = tfa.layers.CorrelationCost(
         kernel_size=1,
         max_displacement=self.kernel // 2,
         stride_1=1,
         stride_2=1,
         pad=self.kernel // 2,
         data_format="channels_last")
     self.corr_cost_stride = tfa.layers.CorrelationCost(
         kernel_size=1,
         max_displacement=(self.kernel // 2) * 2,
         stride_1=1,
         stride_2=2,
         pad=(self.kernel // 2) * 2,
         data_format="channels_last")
     #self.memory = tf.keras.Sequential()
     #self.memory.add(tf.keras.layers.Input(input_shape=((None, None, 256)), batch_input_shape=[4]))
     #self.memory.add(tf.keras.layers.RNN(self.memory_cell, stateful=True))
     self.coef_memory = coef_memory
     self.description = 'KAST'
     self.mem_write = True
     self.mem0 = None
     self.mem5 = None
     self.k0 = None
     self.v0 = None
     self.last_v = None
Пример #3
0
 def performERA(self, quad):
     # Instantiate memory of next context and appends it to the call stack
     next_local_mem = Memory(self.func_table[quad.op1].address_dir.local)
     next_temp_mem = Memory(self.func_table[quad.op1].address_dir.temp)
     next_context = quad.op1
     self.call_stack.append(
         Cache(None, next_context, next_local_mem, next_temp_mem))
Пример #4
0
    def __init__(self, env, config, demo_transitions=None):
        self.sess = tf.InteractiveSession()
        self.config = config
        # replay_memory stores both demo data and generated data, while demo_memory only store demo data
        self.replay_memory = Memory(capacity=self.config.replay_buffer_size, permanent_data=len(demo_transitions))
        self.demo_memory = Memory(capacity=self.config.demo_buffer_size, permanent_data=self.config.demo_buffer_size)
        self.add_demo_to_memory(demo_transitions=demo_transitions)  # add demo data to both demo_memory & replay_memory
        self.time_step = 0
        self.epsilon = self.config.INITIAL_EPSILON

        self.state_dim = 735
        self.action_dim = env.action_space.n

        self.action_batch = tf.placeholder("int32", [None])
        self.y_input = tf.placeholder("float", [None, self.action_dim])
        self.ISWeights = tf.placeholder("float", [None, 1])
        self.n_step_y_input = tf.placeholder("float", [None, self.action_dim])  # for n-step reward
        self.isdemo = tf.placeholder("float", [None])
        self.eval_input = tf.placeholder("float", [None, self.state_dim])
        self.select_input = tf.placeholder("float", [None, self.state_dim])

        self.Q_evaluation
        self.Q_selection

        self.loss
        self.optimize
        self.update_target_net
        self.abs_errors

        self.saver = tf.train.Saver()

        self.sess.run(tf.global_variables_initializer())

        self.save_model()
        self.restore_model()
Пример #5
0
 def run():
     cpu = Cpu()
     cpu.updateDatabase()
     mem = Memory()
     mem.updateDatabase()
     proc = Processes()
     proc.updateDatabase()
Пример #6
0
 def rd():
     print("reading")
     #
     CU.ui.mdr_ld_start()
     CU.ui.mdr_ld_update(Memory.read_byte(MAR.data))
     #
     MDR.load(Memory.read_byte(MAR.data))
Пример #7
0
 def __init__(self):
     # main memory object
     self.memory = Memory()
     # program counter
     self.pc = 0
     # accumulator
     self.accumulator = 0
Пример #8
0
    def __init__(self, actions, gamma=0.1, e_greedy=0.9):
        state_size = 1
        neurons = 24

        self.actions = actions
        self.gamma = gamma
        self.epsilon = e_greedy
        self.lr = 0.1
        self.count = 0
        self.epochs = 5

        self.v_max = 10
        self.v_min = -10
        self.atoms = 51
        self.delta_z = (self.v_max - self.v_min) / (self.atoms - 1)
        self.z = [self.v_min + i * self.delta_z for i in range(self.atoms)]

        self.m = Build_Model(state_size,
                             neurons,
                             len(actions),
                             atoms=self.atoms)
        self.model = self.m.model
        self.dump_model = copy.copy(self.model)

        self.capacity = 300
        self.memory = Memory(self.capacity)
Пример #9
0
 def __init__(self, shape=(84, 84), num_actions=4):
     self.shape = (shape[0], shape[1], 1)
     self.num_actions = num_actions
     self.main_q_net = self._build_network("main")
     self.trgt_q_net = self._build_network('target')
     self.memory = Memory(MAX_MEMORY)
     self.run_counter = 0
Пример #10
0
 def __init__(self, num_states=4, num_actions=2):
     self.num_states = num_states
     self.num_actions = num_actions
     self.main_q_net = self._build_network("main")
     self.trgt_q_net = self._build_network('target')
     self.memory = Memory(MAX_MEMORY)
     self.run_counter = 0
Пример #11
0
def assemble(file_name):
    symbols = []
    memory = Memory()
    instructions = Instructions()
    sap1_parser = Parser()

    print("Assemble {}".format(file_name))
    segments = sap1_parser.parse_file(file_name)

    if segments == []:
        print("ERROR: No code found in source file")
        exit(-2)

    # Extract all the lables from the segments to create a symbol table
    for segment in segments:
        for label in segment.labels:
            symbols.append(label)

    for segment in segments:
        segment.assemble(symbols, instructions)

    code_segment = None
    for segment in segments:
        if segment.is_code():
            code_segment = segment
        memory = segment.load_memory(memory)

    memory.dump(symbols, code_segment)
Пример #12
0
class CartPoleAgent():
    def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size
        # discount rate
        self.gamma = 0.95
        # exploration rate
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995

        self.memory = Memory(2000)
        self.DQN = DQN(state_size, action_size)

    def act(self, state):
        if np.random.rand() <= self.epsilon:
            return random.randrange(self.action_size)
        act_values = self.DQN.predict(state)
        return np.argmax(act_values[0])  # returns action

    def remember(self, state, action, reward, next_state, done):
        self.memory.add((state, action, reward, next_state, done))

    def replay(self, batch_size):
        minibatch = self.memory.sample(batch_size)
        for state, action, reward, next_state, done in minibatch:
            target = reward
            if not done:
                target = reward + self.gamma * np.amax(
                    self.DQN.predict(next_state)[0])
            target_f = self.DQN.predict(state)
            target_f[0][action] = target
            self.DQN.train(state, target_f, epochs=1)
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
Пример #13
0
    def __init__(self, screen, program=None, id=0):

        self.commands = {
            1: self.addition,
            2: self.multiplication,
            3: self.wait_for_input,
            4: self.output,
            5: self.jump_if_true,
            6: self.jump_if_false,
            7: self.less_than,
            8: self.equals,
            9: self.adjust_base,
            99: self.finalize
        }
        self.screen = screen
        self.id = id
        self.pc = 0
        self.p_mem = Memory()
        self.input_address = None
        self.output_value = [None, None, None]
        self.output_in_progress = 0
        self.relative_base = 0
        if program:
            self.store_mem = Memory(program)
            self.reset()
Пример #14
0
    def __init__(self, state_size, action_size, seed):
        """Initialize an Agent object.
        
        Params
        ======
            state_size (int): dimension of each state
            action_size (int): dimension of each action
            seed (int): random seed
        """
        self.state_size = state_size
        self.action_size = action_size
        self.seed = random.seed(seed)

        # Q-Network
        self.qnetwork_local = QNetwork(state_size, action_size,
                                       seed).to(device)
        self.qnetwork_target = QNetwork(state_size, action_size,
                                        seed).to(device)
        self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)

        # Replay memory
        self.memory = Memory(BUFFER_SIZE)
        self.experience = namedtuple(
            "Experience",
            field_names=["state", "action", "reward", "next_state", "done"])

        # Initialize time step (for updating every UPDATE_EVERY steps)
        self.t_step = 0

        # Here we'll deal with the empty memory problem: we pre-populate our memory
        # by taking random actions and storing the experience.
        self.tree_idx = None
Пример #15
0
 def execute(self):
     i = self.var.evaluate()
     e = self.end.evaluate()
     while i <= e:
         Memory.store(self.var.getChar(), i)
         self.blk.execute()
         i = i + 1
Пример #16
0
    def __init__(self, filename=''):
        self.filename = filename   
        
        screenShape = QtGui.QDesktopWidget().screenGeometry()
        self.width = screenShape.width()/3 + 100
        self.height = screenShape.height() - 100
        
        #Create the activities before creating the caregivers
        self.activities_list = ActivitiesList(account=self)
        
        #Store memory objects
        self.memories = [] 
        self.memories2 = []
        #create a bunch of fake data
        people = ['Bill', 'Frank', 'Jess', 'Penelope', 'Faith', 'Kale', 'JJ']
        for i in range(10):
            a = 'Event Title ' + str(i)
            b = QtCore.QDate.currentDate()
            c = 'Description ' + str(i) + 'Generic description of the event, add more details, more details, descriptions, more descriptions, it was fun'
            d = [people[random.randint(0, 6)], people[random.randint(0, 6)], people[random.randint(0, 6)]]
            e = 'stockphoto' + str(i) + '.png'
            l = 'Location ' + str(i)
            self.memories.append(Memory(title=a, date=b, loc=l, descr=c, tags=d, pic_filename=e))
            self.memories2.append(Memory(title=a, date=b, loc=l, descr=c, tags=d, pic_filename=e))
            self.memories[-1].resize_frame(width=self.width, height=3*self.height/6)
            self.memories2[-1].resize_frame(width=self.width, height=3*self.height/6)
        self.memory_browse = mywidgets.MemoryBrowse(elements=self.memories, tags=self.get_tags(), locs=self.get_locations(), account=self)
        self.memory_browse_patient = mywidgets.MemoryBrowse(elements=self.memories2, tags=self.get_tags(), locs=self.get_locations(), account=self, small=True)
        
        #populate with fake data -- eventually either read in data or start from scratch
        self.caregivers = []
        self.caregivers.append(Caregiver(name='Diana', availability=[0, 1, 0, 0, 0, 1, 0], account=self))
        self.caregivers.append(Caregiver(name='Caregiver 1', availability=[0, 0, 1, 0, 0, 0, 0], account=self))
        self.caregivers.append(Caregiver(name='Caregiver 2', availability=[0, 0, 0, 1, 0, 0, 0], account=self))
        self.caregivers.append(Caregiver(name='Caregiver 3', availability=[0, 0, 0, 0, 1, 0, 0], account=self))

        #create a list of colors corresponding to each caregiver
        self.colors = []
        for caregiver in self.caregivers:
            self.colors.append(QtGui.QColor(random.randint(0,255), random.randint(0,255), random.randint(0,255), 150))
            caregiver.browseClicked.connect(self.open_browse_memories)
            caregiver.availabilityChanged.connect(self.update_calendar)
        
        #create the screens
        self.create_caregiver_screen()
        
        #stack for regular caregiver screen, browse memories
        self.cw = QtGui.QStackedLayout() 
        self.cw.addWidget(self.cs)
        self.cw.addWidget(self.memory_browse)
        
        self.caregiver_screen = mywidgets.BaseFrame(width=self.width, height=self.height+100)    
        self.caregiver_screen.grid.addLayout(self.cw, 0, 0)
        
        #suggest an activity
        self.caregivers[self.current_caregiver()].suggest_activity(self.activities_list.get_activity())
        
        #create the patient
        self.patient = Patient(width=self.width, height=self.height, account=self, memory_browse=self.memory_browse_patient)
Пример #17
0
 def __init__(self, conn):
     super(CPU, self).__init__()
     self.conn = conn
     self.pid_count = 0
     self.count = 0
     Memory.set_up()
     self.scheduler = Scheduler()
     self.current_process = None
Пример #18
0
    def add_memory(self, title='', date=None, loc='', descr='', tags=None, pic_filename=''):
        self.memories.append(Memory(title=title, date=date, loc=loc, descr=descr, tags=tags, pic_filename=pic_filename))      
        self.memories[-1].resize_frame(width=self.width, height=3*self.height/5)      
        self.memory_browse.add_element(self.memories[-1], tags=self.get_tags(), locs=self.get_locations())

        self.memories2.append(Memory(title=title, date=date, loc=loc, descr=descr, tags=tags, pic_filename=pic_filename))         
        self.memories2[-1].resize_frame(width=self.width, height=3*self.height/5)          
        self.memory_browse_patient.add_element(self.memories2[-1], tags=self.get_tags(), locs=self.get_locations())
Пример #19
0
    def __init__(self, asm_filename, obj_filename="../test.o"):
        self.memory = Memory()
        self.registers = Registers()
        #        self.executer = Executioner()
        self.assembler = Assembler()

        self.asm_filename = asm_filename
        self.obj_filename = obj_filename
Пример #20
0
    def __init__(self, bTrain):

        # Settings
        self.directory = '/tmp/TrainedQNetwork'
        self.num_actions = 9
        self.im_height = 84
        self.im_width = 84
        self.discount_factor = 0.99
        self.minibatch_size = 32
        self.initial_epsilon = 1.0
        self.final_epsilon = 0.1
        self.epsilon_frames = 1000000
        self.replay_start_size = 50000
        self.policy_start_size = self.replay_start_size
        self.k = 4  # action repeat (frame skipping)
        self.u = 4  # update frequency
        self.m = 4  # number of frames to include in sequence
        self.c = 10000  # number of actions selected before updating the network used to generate the targets

        # Internal Variables
        self.bTrain = bTrain
        self.ki = 0
        self.ui = 0
        self.mi = 0
        self.frame = 0
        self.ci = 0
        self.sequence = []
        self.prev_phi = np.array([])
        self.phi = np.array([])
        self.epsilon_increment = (self.initial_epsilon -
                                  self.final_epsilon) / self.epsilon_frames
        self.epsilon = self.initial_epsilon
        self.action = 0
        self.reward = 0
        self.memory = Memory()
        self.minibatch = MiniBatch()
        self.targets = np.zeros(self.minibatch_size)
        self.bTrial_over = False
        self.bStartLearning = False
        self.bStartPolicy = False
        self.ti = 0

        random.seed(0)

        # Construct tensorflow graphs
        self.q_graph = QGraph(self.im_width, self.im_height, self.m,
                              self.num_actions, self.directory)

        if (self.bTrain):
            self.q_graph.SaveGraphAndVariables()
            self.q_graph_targets = QTargetGraph(self.im_width, self.im_height,
                                                self.m, self.num_actions,
                                                self.directory)
        else:
            self.q_graph.LoadGraphAndVariables()

        return
Пример #21
0
 def __init__(self,
              environment,
              learningRateVar,
              dynamicAlphaVar,
              discountVar,
              nStepVar,
              nPlanVar,
              onPolicyVar,
              updateByExpectationVar,
              behaviorEpsilonVar,
              behaviorEpsilonDecayRateVar,
              targetEpsilonVar,
              targetEpsilonDecayRateVar,
              initialActionvalueMean=0,
              initialActionvalueSigma=0,
              predefinedAlgorithm=None,
              actionPlan=[]):
     self.environment = environment
     if predefinedAlgorithm:
         # TODO: set missing params accordingly
         pass
     self.learningRateVar = learningRateVar
     self.dynamicAlphaVar = dynamicAlphaVar
     self.discountVar = discountVar
     self.behaviorPolicy = EpsilonGreedyPolicy(self, behaviorEpsilonVar,
                                               behaviorEpsilonDecayRateVar)
     self.targetPolicy = EpsilonGreedyPolicy(self, targetEpsilonVar,
                                             targetEpsilonDecayRateVar)
     self.onPolicyVar = onPolicyVar
     self.updateByExpectationVar = updateByExpectationVar
     self.nStepVar = nStepVar
     self.nPlanVar = nPlanVar
     self.initialActionvalueMean = initialActionvalueMean  # TODO: Set this in GUI
     self.initialActionvalueSigma = initialActionvalueSigma  # TODO: Set this in GUI
     self.Qvalues = np.empty_like(self.environment.get_grid())
     self.greedyActions = np.empty_like(self.environment.get_grid())
     self.initialize_Qvalues()
     self.stateActionPairCounts = np.empty_like(self.environment.get_grid())
     self.initialize_stateActionPairCounts()
     # Strictly speaking, the agent has no model at all and therefore in the beginning knows nothing about the environment, including its shape.
     # But to avoid technical details in implementation that would anyway not change the Agent behavior at all,
     # the agent will be given that the states can be structured in a matrix that has the same shape as the environment
     # and that the actionspace is constant for all possible states.
     self.episodicTask = None  # TODO: not used so far
     self.state = None
     self.episodeFinished = False
     self.return_ = None  # underscore to avoid naming conflict with return keyword
     self.episodeReturns = []
     self.memory = Memory(self)
     self.hasChosenExploratoryMove = None
     self.hasMadeExploratoryMove = None
     self.targetAction = None
     self.targetActionvalue = None
     self.iSuccessivePlannings = None
     # Debug variables:
     self.actionPlan = actionPlan
     self.actionHistory = []
Пример #22
0
 def test_copy(self):
     a = Memory(range(0, 10))
     b = a.copy()
     print(type(b))
     a[3] = 77
     self.assertEqual(b[3], 3, "Failed copy")
     a[99] = 77
     self.assertEqual(len(b), 10, "Failed copy - invalid length")
     self.assertEqual(b[99], 0, "Failed copy")
Пример #23
0
    def __init__(self, state_count, action_count):
        self.state_count = state_count
        self.action_count = action_count

        self.brain = Brain(state_count, action_count)
        self.memory = Memory(MEMORY_CAPACITY)

        self.epsilon = MAX_EPSILON
        self.steps = 0
Пример #24
0
 def __init__(self, config):
     self.config = config
     self.epsilon = self.config.explore_start
     print("start of epsilon is ", self.epsilon)
     self.brain = MModel(self.config.action_size, self.config.state_size[0],
                         self.config.state_size[1],
                         self.config.state_size[2])
     self.memory = Memory(self.config.memory_size)
     self.num_actions = self.config.action_size
     self.decayStep = 0
Пример #25
0
def constructDMem():
	memory = [Bus(64) for i in range(5)]
	memory[0] = Bus(0, list(map(int, HexToBin('0x0000000000000001'))))
	memory[1] = Bus(0, list(map(int, HexToBin('0x000000000000000A'))))
	memory[2] = Bus(0, list(map(int, HexToBin('0x0000000000000005'))))
	memory[3] = Bus(0, list(map(int, HexToBin('0x123456789ABCEDFA'))))

	output = Memory(False)
	output.mem = memory
	return output
Пример #26
0
 def __init__(self):
     self.memory = Memory()
     # list of operations stored as list
     self.lines = {}
     # list of actual operation objects that has been executed before
     self.operations = {}
     # list of output lines
     self.output = []
     # instance of operation factory to create operations based on lines of operations
     self.op_maker = OperationFactory(self.memory, self.operations,
                                      self.lines)
Пример #27
0
 def __init__(self, stack_size, max_memory_size, model_save_path):
     self.env = gym.make('BreakoutDeterministic-v4')
     self.memory = Memory(max_memory_size)
     self.frame_stack_size = stack_size
     self.explore_prob = .08
     self.explore_prob_final = 0.01
     self.explore_decay = .995
     self.DQN = DQNModel(self.env.action_space.n, stack_size)
     self.num_exps = 0
     self.model_save_path = model_save_path
     self.discount = .99
Пример #28
0
    def __init__(self):
        self.ip = 0
        self.stack = IntegerStack()
        self.address = IntegerStack()
        self.memory = Memory("ngaImage", InitialImage, 1000000)
        self.clock = Clock()
        self.rng = RNG()
        self.files = FileSystem()
        self.floats = FloatStack()
        self.afloats = FloatStack()
        self.decimals = DecimalStack()
        self.adecimals = DecimalStack()

        self.Dictionary = self.populate_dictionary()
        self.Cached = self.cache_words()

        try:
            self.ui = UI('RETRO', 10, 10)
        except:
            pass

        self.setup_devices()
        self.instructions = [
            self.i_nop,
            self.i_lit,
            self.i_dup,
            self.i_drop,
            self.i_swap,
            self.i_push,
            self.i_pop,
            self.i_jump,
            self.i_call,
            self.i_ccall,
            self.i_return,
            self.i_eq,
            self.i_neq,
            self.i_lt,
            self.i_gt,
            self.i_fetch,
            self.i_store,
            self.i_add,
            self.i_subtract,
            self.i_multiply,
            self.i_divmod,
            self.i_and,
            self.i_or,
            self.i_xor,
            self.i_shift,
            self.i_zreturn,
            self.i_halt,
            self.i_ienumerate,
            self.i_iquery,
            self.i_iinvoke,
        ]
Пример #29
0
    def __init__(
        self,
        n_actions,  # 动作数量
        n_features,  # 每个state所有observation的数量
        learning_rate=0.005,
        reward_decay=0.9,  # gamma,奖励衰减值
        e_greedy=0.9,  # 贪婪值,用来决定是使用贪婪模式还是随机模式
        replace_target_iter=500,  # Target_Net更行轮次
        memory_size=10000,  # 记忆库大小
        batch_size=32,
        e_greedy_increment=None,
        output_graph=False,
        prioritized=True,  # 是否使用优先记忆
        sess=None,
    ):
        self.n_actions = n_actions
        self.n_features = n_features
        self.lr = learning_rate
        self.gamma = reward_decay
        self.epsilon_max = e_greedy
        self.target_net_update_period = replace_target_iter
        self.memory_size = memory_size
        self.batch_size = batch_size
        self.epsilon_increment = e_greedy_increment
        self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max

        self.prioritized = prioritized  # 是否是用优先级记忆

        self.global_step_counter = 0

        self.build_net()

        t_params = tf.get_collection('target_net_params')
        q_params = tf.get_collection('q_net_params')
        self.update_target_net = [
            tf.assign(t, e) for t, e in zip(t_params, q_params)
        ]

        if self.prioritized:  # 使用SumTree
            self.memory = Memory(
                capacity=memory_size)  # 构建一个容量为memory size的记忆库
        else:  # 不使用优先级记忆策略,用一个numpy数组表示记忆
            self.memory = np.zeros((self.memory_size, n_features * 2 + 2))

        if sess is None:
            self.sess = tf.Session()
            self.sess.run(tf.global_variables_initializer())
        else:
            self.sess = sess

        if output_graph:
            tf.summary.FileWriter("logs/", self.sess.graph)

        self.cost_his = []
Пример #30
0
 def test_single_instr (self):
     prog = "I ADDI R1 R1 8"
     memory = Memory ()
     memory.loadProgramDebugFromText (prog)
     processor = Processor (memory, 0)
     disablePrint ()
     processor.start ()
     enablePrint ()
     cpi = processor.getCPI ()
     r1_content = processor.register_file [1]
     self.assertEqual (cpi, 5)
     self.assertEqual (r1_content, 8)
Пример #31
0
 def test_single_instr(self):
     prog = "I ADDI R1 R1 8"
     memory = Memory()
     memory.loadProgramDebugFromText(prog)
     processor = Processor(memory, 0)
     disablePrint()
     processor.start()
     enablePrint()
     cpi = processor.getCPI()
     r1_content = processor.register_file[1]
     self.assertEqual(cpi, 5)
     self.assertEqual(r1_content, 8)
Пример #32
0
    def __init__(self, state_size, action_size):
        self.state_size = state_size
        self.action_size = action_size
        # discount rate
        self.gamma = 0.95
        # exploration rate
        self.epsilon = 1.0
        self.epsilon_min = 0.01
        self.epsilon_decay = 0.995

        self.memory = Memory(2000)
        self.DQN = DQN(state_size, action_size)
Пример #33
0
    def test_load_program(self):
        """Check whether the conversions of instructions into binary
        and back to hex work properly. """
        filename = "./Input_hex_fibonacci.txt"
        program_file = open(filename)
        lines = program_file.readlines()
        program_file.close()
        prog = "\n".join([line.strip() for line in lines if line.strip()])

        memory = Memory()
        memory.loadProgram(filename)
        prog_in_memory = "\n".join([hex(int(instr, 2))[2:].zfill(8).upper()[:8] for instr in memory.list])
        self.assertEqual(prog, prog_in_memory)
Пример #34
0
    def test_independant_instrs_dummy(self):
        prog = """I ADDI R1 R1 1
R ADD  R3 R3 R2
"""
        memory = Memory()
        memory.loadProgramDebugFromText(prog)
        processor = Processor(memory, 0)
        disablePrint()
        processor.start()
        enablePrint()
        cpi = processor.getCPI()
        # CPI should be 3 as there is no stall
        # For the second instruction.
        self.assertEqual(cpi, 3)
Пример #35
0
Файл: R86.py Проект: RahnX/R86
	def __init__(self):
		self.segment_register = SegmentRegister()
		self.integer_register = IntegerRegister()
		self.special_register = SpecialRegister()
		self.memory = Memory()

		self.code_segment = []
		self.label_table  = {}

		self.unary_operation_dict = {}
		self.unary_operation_dict["incl"] = lambda x: x + 1
		self.unary_operation_dict["decl"] = lambda x: x - 1
		self.unary_operation_dict["negl"] = lambda x: -x
		self.unary_operation_dict["notl"] = lambda x: ~x
		self.unary_operation_dict["shrl"] = lambda x: x >> 1
		self.unary_operation_dict["shll"] = lambda x: x >> 1

		self.binary_operation_dict = {}
		self.binary_operation_dict["addl"]  = lambda x, y: x + y
		self.binary_operation_dict["subl"]  = lambda x, y: x - y
		self.binary_operation_dict["imull"] = lambda x, y: x * y
		self.binary_operation_dict["orl"]  = lambda x, y: x | y
		self.binary_operation_dict["andl"] = lambda x, y: x & y
		self.binary_operation_dict["xorl"] = lambda x, y: x ^ y

		self.binary_operation_dict["shrl"] = lambda x, y: x >> y
		self.binary_operation_dict["shll"] = lambda x, y: x << y

		self.register_table = self.segment_register.register_table.copy()
		self.register_table.update(self.integer_register.register_table)
		self.register_table.update(self.special_register.register_table)
Пример #36
0
    def get_jump_address(npc, instr):
        """Return jump address for instr given npc.

        Take 4 msb of old PC
        Mul offset_from_pc by 4
        Concatenate
        That's where we should jump

        Arguments:
        - `instr`: J-type instruction.
        """
        old_pc = npc - 4
        pc_msb = Memory.get_binary_string(old_pc)[:4]
        imm = Memory.get_binary_string(instr.offset_from_pc * 4, 28)
        jump_addr = int(pc_msb + imm, 2)
        return jump_addr
Пример #37
0
 def __init__(self):
     self.memory = Memory()
     # list of operations stored as list
     self.lines = {}
     # list of actual operation objects that has been executed before
     self.operations = {}
     # list of output lines
     self.output = []
     # instance of operation factory to create operations based on lines of operations
     self.op_maker = OperationFactory(self.memory, self.operations, self.lines)
Пример #38
0
 def __init__(self):
     self.net = None
     self.env = Environment(False, 4)
     self.mem = Memory(32, 1000000)
     self.epsilon = 0.5
     self.gamma = 0.7
     self.number_of_actions = 4
     try:
         self.load_network()
     except IOError:
         print 'No network found'
         self.create_model()
Пример #39
0
class DeviceManager:
    def __init__(self):
        self.memory = Memory()
        self.disk = Disk()

    def save_program(self, program):
        self.disk.add_program(program)

    def get_program(self, programs_name):
        return self.disk.get_program(programs_name)

    def load_program(self, program):
        return self.memory.load(program)
class TestsMemoria(unittest.TestCase):

    def setUp(self):
        self.memoria = Memory()
        self.memoria.write(0, "primeraInstruccion")
        self.memoria.write(1, "segundaInstruccion")

    def test_siguientePosicion(self):
        self.assertEquals(2, self.memoria.next_position)

    def test_hayEspacioParaGuardar(self):
        self.assertTrue(self.memoria.free_memory_to_save(10))

    def test_readMemoria(self):
        self.assertEqual("primeraInstruccion", self.memoria.read(0))

    def test_deleteMemoria(self):
        self.memoria.delete("primeraInstruccion")
        self.assertEquals(511, self.memoria.free_cells)

    def test_size(self):
        self.assertEqual(512, self.memoria.size)
Пример #41
0
	def set_value(self, value):
		Memory.store(self.ch, value)
Пример #42
0
 def run(self, lapse, time):
     if not self.started:
         Memory.writeCallHistory(self.proc_type, self.date, self.number, self.execution_time)
     super(CallProcess, self).run(lapse, time)  
Пример #43
0
 def setValue(self, value):
     Memory.store(self.var, value)
Пример #44
0
    def __repr__ (self):
        return self.__str__ ()

    def __eq__(self, other):
        """Return True iff self and other have the same attributes.
        
        Arguments:
        - `other`:
        """
        if isinstance(other, self.__class__):
            return self.__dict__ == other.__dict__
        else:
            return False

    def __ne__(self, other):
        """Return False iff self and other have the same attributes.
        
        Arguments:
        - `other`:
        """
        return not self.__eq__(other)



if __name__ == "__main__" :
    memory = Memory ()
    memory.loadProgram ('./Input_hex_fibonacci.txt')
    for instr in memory:
        print Instruction (instr).interpret ()
Пример #45
0
class Interpreter(object):
    def __init__(self):
        self.memory = Memory()
        # list of operations stored as list
        self.lines = {}
        # list of actual operation objects that has been executed before
        self.operations = {}
        # list of output lines
        self.output = []
        # instance of operation factory to create operations based on lines of operations
        self.op_maker = OperationFactory(self.memory, self.operations, self.lines)

    def add_line(self, number, line):  
        """
        Add operation lines to the Interpreter
        """
        self.lines[int(number)] = line

    def get_output(self):
        """
        Get the list of output
        """
        return self.output

    def get_variables(self):
        """
        Get the list of variables in the memory
        """
        return self.memory.get_variables()

    def write_error(self, msg):
        self.output = ["Error", msg]

    def clear_output(self):
        self.output = []

    def run(self):
        """
        Sort the lines of operation and run them line by line
        Go through each line, create related operation then execute the operations
        """
        line_number = sorted(self.lines.keys())
        sub_stack = []
        index = 0
        while index < len(line_number):
            current_no = line_number[index]
            current_line = self.lines[current_no]

            if current_line[0] == 'END':
                return 0
            
            elif current_line[0] == 'GOTO' or current_line[0] == 'GOSUB':
                if int(current_line[1]) == current_no:
                    self.write_error("Cannot Goto same line number")
                    return 0        
                
                if current_line[0] == 'GOSUB':
                    sub_stack.append(index+1)
                    print(index)

                if int(current_line[1]) in line_number:
                    index = line_number.index(int(current_line[1])) 
                    print(index)
                else:
                    self.write_error("Line Number does not exist")
                    return 0

            elif current_line[0] == 'RETURN':
                if len(sub_stack) < 1:
                    #TODO throw an exception 'RETURN WITHOUT GOSUB'
                   # self.write_error("RETURN WITHOUT GOSUB")
                    return 0
                else:
                    index = sub_stack.pop()
            
            else:           
                op = self.op_maker.create_operation(current_line)
                if isinstance(op, str):
                    self.write_error(op)
                    return 0
                self.operations[current_no] = op

                # Get the result of each operation
                result = op.operate(self.memory)
            
                # if an operation returns a value then add the result to output list
                if result is not None:
                    self.output.append(result)
           
                if current_line[0] == "IF" and result is True:
                    index = line_number.index(int(current_line[5]))
                else:
                    index += 1
 def execute(self, prog):
     print(Memory.fetch(self.var.getCh()))
     
Пример #47
0
from Memory import Memory
from globalHelp import *
from SystemQueue import SystemQueue
from CPU import CPU
from Printer import Printer
from Logger import Logger
#---------------------------- Global Variables ----------------------------------------------


CPU = CPU()

Memory.initialise()
SystemBuffer.initialise()
Printer.initialize()
Logger.initialize()

def main():
    channel.channel1.ChannelBusy = True
    channel.channel3.ChannelBusy = True
    channel.channel2.ChannelBusy = True

    while(not channel.ChannelIdle()):
        simulate()
        CPU.Process()
        CPU.IOInterrupt()


def simulate():
    busyList = []
    for ch in channel.channelList:
        if ch.ChannelBusy:
Пример #48
0
from Temperature import Temperature
from Distance import Distance
from Memory import Memory
from Weight import Weight

if __name__ == '__main__':
    while (True):
        choice = input ('Available conversions ... \n' +
                    '1. Temperature\n' +
                    '2. Distance\n' +
                    '3. Memory\n' +
                    '4. Weight\n' +
                    '5. Exit\n' +
                    'Please enter your choice ... ' )
        if choice == '1' :
            t = Temperature()
            t.convert()
        elif choice == '2' :
            d = Distance()
            d.convert()
        elif choice == '3':
            m = Memory()
            m.convert()
        elif choice == '4' :
            w = Weight()
            w.convert()
        else:
            print('Exiting now ... Bye!')
            break
    
Пример #49
0
    def run(self):
        new_queue = Queue.Queue()
        
        speed = 1
        
        curr_time = 0
        run = True
        
        started = False
        
        while run:
            # Recibir comandos desde la consola
            while self.conn.poll():
                cc, args = self.conn.recv()
                new_process = None
                
                if cc == self.CC_EXIT:
                    run = False
                    break
                elif cc == self.CC_MAKE_CALL:
                    new_process = CallProcess(self.pid_count + 1, 1, 'make_call', *args)
                elif cc == self.CC_RECEIVE_CALL:
                    new_process = CallProcess(self.pid_count + 1, 2, 'receive_call', *args)
                elif cc == self.CC_SEND_MSG:
                    new_process = MessageProcess(self.pid_count + 1, 3, 'send_msg', *args)
                elif cc == self.CC_RECEIVE_MSG:
                    new_process = MessageProcess(self.pid_count + 1, 4, 'receive_msg', *args)
                elif cc == self.CC_ADD_CONTACT:
                    new_process = AddContactProcess(self.pid_count + 1, 5, 'add_contact', *args)
                elif cc == self.CC_RANDOM:
                    new_process = RandomProcess(self.pid_count + 1, 6, 'random_process', *args)
                elif cc == self.CC_SEND_LOCATION:
                    new_process = SendLocationProcess(self.pid_count + 1, 7, 'send_location', *args)
                elif cc == self.CC_WATCH_LOCATION:
                    new_process = WatchLocationProcess(self.pid_count + 1, 8, 'watch_location', *args)
                elif cc == self.CC_PLAY_GAME:
                    new_process = PlayGameProcess(self.pid_count + 1, 9, 'play_game', *args)
                elif cc == self.CC_PLAY_MUSIC:
                    new_process = PlayMusicProcess(self.pid_count + 1, 10, 'play_music', *args)
                elif cc == self.CC_START_PROCESS:
                    new_process = Process(self.pid_count + 1, 6 , *args)
                elif cc == self.CC_TOP:
                    self.scheduler.top(self.current_process)
                elif cc == self.CC_CALL_HISTORY:
                    Memory.readCallHistory()
                elif cc == self.CC_MSG_HISTORY:
                    Memory.readMsgHistory()
                elif cc == self.CC_SIMULATE:
                    started = True
                    speed = float(args[0]) if len(args) == 1 else 1
                    print 'Empezando simulacion'

                # Se agregan procesos a New
                if new_process is not None:
                    self.pid_count += 1
                    new_queue.put((new_process.start_time, new_process))

            if started:
                # Se agregan procesos de la cola New a la Cola Ready
                i = 0
                n = new_queue.qsize()
                while i < n:
                    process = new_queue.get()[1]
                    if (process.start_time*1000 == curr_time):
                        print 'Agendando Proceso %s numero %s en el tiempo %s' % (process.name,process.pid, curr_time)
                        self.scheduler.add(process)
                    else:
                        new_queue.put((process.start_time, process))
                    i+=1

                if self.current_process is not None:
                    if self.current_process.pending_time <= 0:
                        # Liberar perifericos bloqueados
                        self.current_process.free_peripherals()
                        # Eliminar el proceso del scheduler
                        self.scheduler.dispose(self.current_process)
                        self.current_process = None

                    # El scheduler devuelve el proceso que debe estar actualmente
                    # Si es tiempo de cambiar el proceso y ya hay uno usandose, se cambia
                    elif self.count == self.QUANTUM:
                        self.count = 0
                        #El proceso retirado va al final de la fila
                        self.scheduler.dispose(self.current_process)
                        self.scheduler.add(self.current_process)
                        self.current_process = self.scheduler.getNext()

                # Si la CPU no fue usada anteriormente, vemos si hay otro proceso esperando
                if self.current_process is None:
                    self.current_process = self.scheduler.getNext()

                # Si no, solo aumenta e contador de tiempo
                if self.count == self.QUANTUM:
                    self.count = 0

                if self.current_process is not None:
                    # ejecutar el proceso por la cantidad de tiempo concedida
                    self.current_process.run(self.TIME_STEP, curr_time)

                self.count +=1
                curr_time += self.TIME_STEP
                
            time.sleep(self.TIME_STEP / (1000 * speed))
Пример #50
0
 def evaluate(self):
     return Memory.fetch(self.char)
 def setUp(self):
     self.memoria = Memory()
     self.memoria.write(0, "primeraInstruccion")
     self.memoria.write(1, "segundaInstruccion")
Пример #52
0
 def __init__(self):
     self.memory = Memory()
     self.disk = Disk()
Пример #53
0
	def evaluate(self):
		Memory.fetch(self.ch)
 def execute(self, prog):
     Memory.store(self.var.getCh(), self.expr.evaluate())
Пример #55
0
 def run(self, lapse, time):
     if not self.started:
         Memory.writeMsgHistory(self.proc_type, self.date, self.number, self.text)
     super(MessageProcess, self).run(lapse, time)  
Пример #56
0
# -*- coding: utf-8 -*-
from Memory import Memory

if __name__ == "__main__":
    m = Memory()
    m.new_object("Я")
Пример #57
0
 def execute(self):
     Memory.store(self.var, self.expr.evaluate())
Пример #58
0
class Q:
    def __init__(self):
        self.net = None
        self.env = Environment(False, 4)
        self.mem = Memory(32, 1000000)
        self.epsilon = 0.5
        self.gamma = 0.7
        self.number_of_actions = 4
        try:
            self.load_network()
        except IOError:
            print 'No network found'
            self.create_model()

    def create_model(self):
        print 'Creating model...'
        model = Sequential()
        model.add(
            Convolution2D(32, 8, 8, subsample=(4, 4), activation='relu', input_shape=(4, 84, 84)))
        model.add(Convolution2D(64, 4, 4, activation='relu', subsample=(2, 2)))
        model.add(Convolution2D(64, 3, 3, activation='relu', subsample=(1, 1)))
        model.add(Flatten())
        model.add(Dense(512, activation='relu'))
        model.add(Dense(self.number_of_actions, activation='linear'))
        model.compile(loss='mse', optimizer='rmsprop')
        self.net = model
        print 'Done!'

    def save_network(self):
        json_string = self.net.to_json()
        open('deep_q_network.json', 'w').write(json_string)
        self.net.save_weights('network_weights.h5', overwrite=True)

    def load_network(self):
        print 'Loading network...'
        model = model_from_json(open('deep_q_network.json').read())
        model.load_weights('network_weights.h5')
        model.compile(loss='mse', optimizer='rmsprop')
        print 'Network loaded!'
        self.net = model

    def train(self, epochs):
        for i in xrange(epochs):
            state = self.env.get_state()
            while not self.env.isTerminal():
                qval = self.net.predict(state.reshape(1, 4, 84, 84), batch_size=1)
                if random.random() < self.epsilon:  # choose random action
                    action = np.random.randint(0, self.number_of_actions)
                else:  # choose best action from Q(s,a) values
                    action = np.argmax(qval)
                # Take action, observe new state S'
                reward = self.env.act(action)
                new_state = self.env.get_state()
                # Experience replay storage
                is_terminal = self.env.isTerminal()

                self.mem.store(state, action, reward, new_state, is_terminal)

                print 'Game : {}'.format(i)
                if self.mem.isFull():
                    minibatch = self.mem.sample()
                    self.train_on_minibatch(minibatch)
                state = new_state

            if self.epsilon > 0.1:  # decrement epsilon over time
                self.epsilon -= (1 / 100000)
            self.env.restart()
            if i % 10 == 0:
                self.save_network()

    def train_on_minibatch(self, minibatch):
        x_train, y_train = [], []
        for sample in minibatch:
            # Get max_Q(S',a)
            old_state, action, reward, new_state, terminal = sample
            old_qval = self.net.predict(old_state.reshape(1, 4, 84, 84), batch_size=1)
            newQ = self.net.predict(new_state.reshape(1, 4, 84, 84), batch_size=1)
            maxQ = np.max(newQ)
            y = np.zeros((1, self.number_of_actions))
            y[:] = old_qval[:]
            if not terminal:  # non-terminal state
                update = (reward + (self.gamma * maxQ))
            else:  # terminal state
                update = reward
            y[0][action] = update
            x_train.append(old_state.reshape(4, 84, 84))
            y_train.append(y.reshape(self.number_of_actions, ))

        x_train = np.array(x_train)
        y_train = np.array(y_train)
        self.net.fit(x_train, y_train, batch_size=self.mem.batch_size, nb_epoch=1)

    def play(self):
        environment = Environment(True, 4)
        while not environment.isTerminal():
            state = environment.get_state()
            qval = self.net.predict(state.reshape(1, 4, 84, 84), batch_size=1)
            action = (np.argmax(qval))
            reward = environment.act(action)
Пример #59
0
            self.execute_one_cycle()

            if not self.are_instructions_in_flight() or (
                    num_cycles is not None and self.cycle_count == num_cycles):
                break

        print '\nAt the end'
        print '=' * 12
        self.print_buffers ()
        print self.register_file

    def start(self, cycle_data_file_name = default_data_file_name):
        """Start execution of instructions from the start_address.
        """
        self.instruction_address = self.start_address
        self.execute_cycles()

    def getCPI (self):
        return (1.0 * self.cycle_count) / self.fetch_stage.fetch_input_buffer.instr_count

if __name__ == "__main__":
    memory = Memory ()
    filename = './fibo.txt'
    if len (sys.argv) > 1:
        filename = sys.argv [1]
    memory.loadProgramDebug (filename)
    processor = Processor (memory, 0)
    processor.start ()
    print 'CPI: ', processor.getCPI ()
Пример #60
0
Файл: R86.py Проект: RahnX/R86
class R86:
	def __init__(self):
		self.segment_register = SegmentRegister()
		self.integer_register = IntegerRegister()
		self.special_register = SpecialRegister()
		self.memory = Memory()

		self.code_segment = []
		self.label_table  = {}

		self.unary_operation_dict = {}
		self.unary_operation_dict["incl"] = lambda x: x + 1
		self.unary_operation_dict["decl"] = lambda x: x - 1
		self.unary_operation_dict["negl"] = lambda x: -x
		self.unary_operation_dict["notl"] = lambda x: ~x
		self.unary_operation_dict["shrl"] = lambda x: x >> 1
		self.unary_operation_dict["shll"] = lambda x: x >> 1

		self.binary_operation_dict = {}
		self.binary_operation_dict["addl"]  = lambda x, y: x + y
		self.binary_operation_dict["subl"]  = lambda x, y: x - y
		self.binary_operation_dict["imull"] = lambda x, y: x * y
		self.binary_operation_dict["orl"]  = lambda x, y: x | y
		self.binary_operation_dict["andl"] = lambda x, y: x & y
		self.binary_operation_dict["xorl"] = lambda x, y: x ^ y

		self.binary_operation_dict["shrl"] = lambda x, y: x >> y
		self.binary_operation_dict["shll"] = lambda x, y: x << y

		self.register_table = self.segment_register.register_table.copy()
		self.register_table.update(self.integer_register.register_table)
		self.register_table.update(self.special_register.register_table)

	def unary_operate(self, ins, dest):
		result = self.unary_operation_dict[ins](self.get(dest))
		self.set(result, dest)
		self.set_condition_code(result)

	def binary_operate(self, ins, source, dest):
		result = self.binary_operation_dict[ins](self.get(dest), source)
		self.set(result, dest)
		self.set_condition_code(result)

	def compare(self, ins, second_source, first_source):
		self.set_condition_code(first_source - second_source)

	def test(self, ins, second_source, first_source):
		self.set_condition_code(first_source & second_source)

	def jump_to_table(self, label, offset):
		self.set_reg(self.label_table[label]+offset, "eip")
		target_label_line = self.code_segment[self.get_reg("eip")+1]
		target_label = target_label_line[len(".long"):].strip()
		label_pos = self.label_table[target_label]
		self.set_reg(label_pos, "eip")

		#print("label_pos : {}".format(label_pos))

	def conditional_jump(self, ins, label):
		should_jump = {
			"jmp": True,
			"je" : self.get_reg("ZF"),
			"jne": not self.get_reg("ZF"),
			"jl" : self.get_reg("SF"),
			"jle": self.get_reg("SF") or self.get_reg("ZF"),
			"jg" : not (self.get_reg("SF") or self.get_reg("ZF")),
			"jge": not self.get_reg("SF"),
			"js" : self.get_reg("SF"),
			"jns": not self.get_reg("SF")
		}[ins]

		if should_jump:
			self.set_reg(self.label_table[label], "eip")

	def set_condition_code(self, result):
		if result == 0:
			self.set_reg(1, "ZF")
			self.set_reg(0, "SF")
		elif result < 0:
			self.set_reg(0, "ZF")
			self.set_reg(1, "SF")
		else: #result > 0
			self.set_reg(0, "ZF")
			self.set_reg(0, "SF")

	def set(self, source_value, dest):
		if dest in self.integer_register.name_list:
			self.set_reg(source_value, dest)
		else:
			self.set_memory(source_value, dest)

	def get(self, dest):
		if dest in self.integer_register.name_list:
			return self.get_reg(dest)
		else:
			return self.get_memory(dest)

	def set_reg(self, _value, reg):
		try:
			self.register_table[reg].set_value(_value)
		except LookupError:
			print("***\nRegister not found: [" + reg + "]\n***")
			exit()

	def get_reg(self, reg):
		try:
			return self.register_table[reg].get_value()
		except LookupError:
			print("***\nRegister not found: [" + reg + "]\n***")
			exit()

	def init_memory(self, _min, _max):
		self.memory.init(_min, _max)

	def set_memory(self, _value, _address):
		self.memory.set(_value, _address)

	def get_memory(self, _address):
		return self.memory.get(_address)

	def print_register(self):
		self.integer_register.print_self()
		self.special_register.print_self()
		self.segment_register.print_self()

	def print_memory(self):
		self.memory.print_self()

	def print_self(self):
		self.print_register()
		print("LABEL TABLE")
		print(self.label_table)
		print()
		self.print_memory()