def compareFixed(): t = Tasks() x_test, y_test = t.sequence_type_1(100) add_params, mul_params = torch.load('program_memory/add.pt'), torch.load( 'program_memory/mul.pt') hnm = HNM(10, 20, add_params, mul_params) hnm.load_state_dict(torch.load("learned_params/hnm_arch_2.pt")) ntm = NTM(10, 20) ntm.load_state_dict(torch.load("learned_params/ntm.pt")) lstm = LSTM(14, 256, 325, 1) lstm.load_state_dict(torch.load("learned_params/lstm.pt")) hnm_diff, lstm_diff, ntm_diff = 0, 0, 0 for i in range(len(x_test)): hnm_out = hnm.recurrent_forward(x_test[i:i + 1]) ntm_out = ntm.recurrent_forward(x_test[i:i + 1]) lstm_out = lstm.recurrent_forward(x_test[i:i + 1]) answer = np.argmax(y_test[i:i + 1].detach().numpy()) hnm_diff += abs(answer - np.argmax(hnm_out.detach().numpy())) ntm_diff += abs(answer - np.argmax(ntm_out.detach().numpy())) lstm_diff += abs(answer - np.argmax(lstm_out.detach().numpy())) print(hnm_diff / len(y_test), ntm_diff / len(y_test), lstm_diff / len(y_test))
def trainNTM(): t = Tasks() x_train, y_train = t.sequence_type_1(2000) ntm = NTM(10, 20) ntm.train(x_train, y_train, 1, maxEpoch=25, learning_rate=0.0006)
def trainHarvard(): t1 = Tasks() x_train, y_train = t1.sequence_type_1(2000) add_params, mul_params = torch.load('program_memory/add.pt'), torch.load( 'program_memory/mul.pt') hnm = HNM(10, 20, add_params, mul_params) hnm.train(x_train, y_train, 1)
def trainLSTM(): t = Tasks() x_train, y_train = t.sequence_type_1(2000) lstm = LSTM(14, 256, 325, 1) lstm.train(x_train, y_train, maxEpoch=25, learning_rate=0.0003)