def _STI(instruction): # Source Register (the register containing the data). SR = (instruction >> 9) & 0x7 # the value of what called an offset (embedded within the instruction code). PCoffset = sign_extend(instruction & 0x1ff, 9) # the address of the address that the data will be stored at. address = reg_read(Registers.PC) + PCoffset # write the data (stored in the SR) into the memory (the address is explained in the previous line). mem_write(mem_read(address), reg_read(SR))
def _IN(): """input a string""" # get string from user ch_arr = map(ord, list(input())) # get memory location to write data in i = reg_read(Registers.R0) for ch in ch_arr: if chr(ch) != '\0': mem_write(i, ch) i += 1 else: break """ch = _GETC()
def _STR(instruction): """store register""" # compute the index of source register SR_reg = (instruction >> 9) & 0x0007 # compute the index of BaseR registe BaseR = (instruction >> 6) & 0x0007 # compute and extention offset6 value offset6 = sign_extend(instruction & 0x003F, 6) # read the new memory storage value value = reg_read(SR_reg) # compute memory addresse mem_addresse = reg_read(BaseR) + offset6 # write new value to memory mem_write(mem_addresse, value)
def _ST(instruction): """store""" DR = (instruction >> 9) & 0x7 pc_offset = sign_extend(instruction & 0x1ff, 9) mem_write(reg_read(Registers.PC) + pc_offset, reg_read(DR))
def process_step(self, external_input, memory, read_vectors, previous_weights): """ NTM.process(external_input, read_vector, previous_weights) -> new_memory, new_read_vectors, new_weights, output the main method of this entire program. Processes the data with a NTM. @param external_input: a batchsize x input_size matrix that represents the data for this timestep @param memory: a batchsize x N x M memory 3-tensor from the previous timestep @param read_vector: a self.read_heads x batchsize x N 3-tensor that represents all of the read vectors produced by read heads @param previous_weights: a 1 + self.read_heads x batchsize x M 3-tensor that represents all of the weightings produced by all heads in the previous timestep """ # concatenated_read_vector -> batchsize x self.slot_size * read_vector concatenated_read_vector = T.concatenate( [read_vectors[head] for head in range(len(self.read_heads))], axis=1) # concatenated_input -> batchsize x self.slot_size * read_vector + external_input_length concatenated_input = T.concatenate( [concatenated_read_vector, external_input], axis=1) # controller_output -> batchsize x self.controller_size controller_output = T.nnet.relu( self.controller.forward(concatenated_input)) # Represents NTM's actual output for this timestep # ntm_output -> batchsize x self.output_size ntm_output = T.dot(controller_output, self.output_weight) # let's get reading out of the way first for head in range(len(self.read_heads)): key, shift, sharpen, strengthen, interpolation = self.read_heads[ head].produce(controller_output) # preprocess the values in preparation for mem_focus # key -> batchsize x 1 x N # strengthen -> batchsize x 1 (already good) key = key.dimshuffle([0, 'x', 1]) # Focus by content + strengthen # preliminary_weight -> batchsize x M preliminary_weight = mem_focus(memory, key, strengthen) # Focus by location interpolated_weight = interpolation * preliminary_weight + ( 1 - interpolation) * previous_weights[head] # Shift # Both arguments are batchsize x M, first being weighting, second being shift shifted_weight = focus_shift(interpolated_weight, shift, self.memory_slots) # Sharpen # We added broadcasted the second axis of sharpen, remember? :)))))))))))))))))))))))))) # sharpened_weight -> batchsize x M sharpened_weight = shifted_weight**sharpen # Normalize # T.sum(...) -> batchsize x 1 final_weight = sharpened_weight / (T.sum( sharpened_weight, axis=1, keepdims=True) + SMALL_CONSTANT) # read, read, read! # read_vec -> batchsize x N x 1, so we gotta flatten to batchsize x N read_vec = mem_read(memory, final_weight) read_vectors = T.set_subtensor(read_vectors[head], T.flatten(read_vec, outdim=2)) previous_weights = T.set_subtensor(previous_weights[head], final_weight) # let's write now! key, add, erase, shift, sharpen, strengthen, interpolation = self.write_head.produce( controller_output) # preprocess the values in preparation for mem_focus # key -> batchsize x 1 x N # strengthen -> batchsize x 1 (already_good) key = key.dimshuffle([0, 'x', 1]) # Focus by content + strengthen # preliminary_weight -> batchsize x M preliminary_weight = mem_focus(memory, key, strengthen) # Focus by location interpolated_weight = interpolation * preliminary_weight + ( 1 - interpolation) * previous_weights[-1] # Shift shifted_weight = focus_shift(interpolated_weight, shift, self.memory_slots) # Sharpen sharpened_weight = shifted_weight**sharpen # Normalize # T.sum(...) -> batchsize x 1 final_weight = sharpened_weight / ( T.sum(sharpened_weight, axis=1, keepdims=True) + SMALL_CONSTANT) previous_weights = T.set_subtensor(previous_weights[-1], final_weight) # preprocess the values in preparation for mem_write # weighting -> batchsize x 1 x M # erase_vector -> batchsize x N x 1 # add_vector -> batchsize x N x 1 final_weight = final_weight.dimshuffle([0, 'x', 1]) erase = erase.dimshuffle([0, 1, 'x']) add = add.dimshuffle([0, 1, 'x']) new_memory = mem_write(memory, final_weight, erase, add) # phew, almost done! return new_memory, read_vectors, previous_weights, ntm_output