def example_NN(hf=True): p, inputs, s, costs = simple_NN((2, 50, 40, 30, 1)) xor_dataset = [[], []] for i in range(50000): x = numpy.random.randint(0, 2, (50, 2)) t = (x[:, 0:1] ^ x[:, 1:2]).astype(theano.config.floatX) x = x.astype(theano.config.floatX) xor_dataset[0].append(x) xor_dataset[1].append(t) training_examples = len(xor_dataset[0]) * 3 / 4 train = [ xor_dataset[0][:training_examples], xor_dataset[1][:training_examples] ] valid = [ xor_dataset[0][training_examples:], xor_dataset[1][training_examples:] ] gradient_dataset = SequenceDataset(train, batch_size=None, number_batches=10000) cg_dataset = SequenceDataset(train, batch_size=None, number_batches=5000) valid_dataset = SequenceDataset(valid, batch_size=None, number_batches=5000) if hf: hf_optimizer(p, inputs, s, costs).train(gradient_dataset, cg_dataset, initial_lambda=1.0, preconditioner=True, validation=valid_dataset) else: sgd_optimizer(p, inputs, costs, gradient_dataset, lr=1e-3)
def example_RNN(hf=True): p, inputs, s, costs, h, ha = simple_RNN(100) # memorize the first unit for 100 time-steps with binary noise memorization_dataset = [[]] for i in range(100000): memorization_dataset[0].append( numpy.random.randint(2, size=(100, 1)).astype(theano.config.floatX)) train = [memorization_dataset[0][:-1000]] valid = [memorization_dataset[0][-1000:]] gradient_dataset = SequenceDataset(train, batch_size=None, number_batches=5000) cg_dataset = SequenceDataset(train, batch_size=None, number_batches=1000) valid_dataset = SequenceDataset(valid, batch_size=None, number_batches=1000) if hf: hf_optimizer(p, inputs, s, costs, 0.5 * (h + 1), ha).train(gradient_dataset, cg_dataset, initial_lambda=0.5, mu=1.0, preconditioner=False, validation=valid_dataset) else: sgd_optimizer(p, inputs, costs, gradient_dataset, lr=5e-5)
def test_real(n_updates=100): """ Test RNN with real-valued outputs. """ n_hidden = 10 n_in = 5 n_out = 3 n_steps = 10 n_seq = 1000 np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps, n_out)) targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1 targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1 targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2 targets += 0.01 * np.random.standard_normal(targets.shape) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) plt.close('all') fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[0]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.plot(targets[0]) guess = model.predict(seq[0]) guessed_targets = plt.plot(guess, linestyle='--') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_title('solid: true output, dashed: model output')
def test_real(n_updates=100): """ Test RNN with real-valued outputs. """ train, valid, test = process_data.load_data() tseq, ttargets = train vseq, vtargets = valid test_seq, test_targets = test length = len(tseq) n_hidden = 6 n_in = 48 n_out = 12 n_steps = 1 n_seq = length seq = [[i] for i in tseq] targets = [[i] for i in ttargets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, learning_rate=0.001, learning_rate_decay=0.999, n_epochs=500, activation='relu') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) test_seq = [[i] for i in test_seq] test_targets = [[i] for i in test_targets] plt.close("all") for idx in xrange(len(test_seq)): guess = model.predict(test_seq[idx]) plot_predictions(test_seq[idx][0], test_targets[idx][0], guess[0])
def test_binary(multiple_out=False, n_updates=250): """ Test RNN with binary outputs. """ n_hidden = 10 n_in = 5 if multiple_out: n_out = 2 else: n_out = 1 n_steps = 10 n_seq = 100 np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps, n_out), dtype='int32') # whether lag 1 (dim 3) is greater than lag 2 (dim 0) targets[:, 2:, 0] = np.cast[np.int32](seq[:, 1:-1, 3] > seq[:, :-2, 0]) if multiple_out: # whether product of lag 1 (dim 4) and lag 1 (dim 2) # is less than lag 2 (dim 0) targets[:, 2:, 1] = np.cast[np.int32]( (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0]) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='binary') # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) # using settings of initial_lambda and mu given in Nicolas' RNN example # seem to do a little worse than the default opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) seqs = xrange(10) plt.close('all') for seq_num in seqs: fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[seq_num]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o') guess = model.predict_proba(seq[seq_num]) guessed_targets = plt.step(xrange(n_steps), guess) plt.setp(guessed_targets, linestyle='--', marker='d') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_ylim((-0.1, 1.1)) ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_updates=250): """ Test RNN with softmax outputs. """ n_hidden = 10 n_in = 5 n_steps = 10 n_seq = 100 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps), dtype='int32') thresh = 0.5 # if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh # class 1 # if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh # class 2 # if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh # class 0 targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2 #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0]) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) # using settings of initial_lambda and mu given in Nicolas' RNN example # seem to do a little worse than the default opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) seqs = xrange(10) plt.close('all') for seq_num in seqs: fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[seq_num]) ax1.set_title('input') ax2 = plt.subplot(212) # blue line will represent true classes true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o') # show probabilities (in b/w) output by model guess = model.predict_proba(seq[seq_num]) guessed_probs = plt.imshow(guess.T, interpolation='nearest', cmap='gray') ax2.set_title('blue: true class, grayscale: probs assigned by model')
thresh = 0.5 # if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh # class 1 # if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh # class 2 # if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh # class 0 targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2 #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0]) ''' print("Convert to SequenceDataset ...") gradient_dataset = SequenceDataset([x_seq_list, y_seq_list], batch_size=None, number_batches=100) cg_dataset = SequenceDataset([x_seq_list, y_seq_list], batch_size=None, number_batches=50) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, n_epochs=n_epochs, activation='tanh', output_type='softmax', use_symbolic_softmax=True, L2_reg=0.0001) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, n_in=n_in, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) print("Training ...") opt.train(gradient_dataset, cg_dataset, initial_lambda=1.0, num_updates=n_updates, save_progress='save_param')
a1 = a1 + 4 a2 = a2 + 4 seqs = seq.astype(floatX) targets = target.astype(floatX) test_seqs = test_seq.astype(floatX) test_targets = test_target.astype(floatX) [n_seq, n_steps, n_in] = seq.shape n_hidden = 2 * n_in n_out = 24 seqs = [i for i in seqs] targets = [i for i in target] gradient_dataset = SequenceDataset([seqs, targets], batch_size=None, number_batches=80) cg_dataset = SequenceDataset([seqs, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_mul=n_hidden, n_out=n_out, activation='tanh') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h)
def RNN_Evaluation(sample, lable, n_hidden=10, activation_func='tanh', n_updates=20, k_fold=5): X = sample y = lable kf = KFold(n_splits=k_fold, shuffle=True) split_num = kf.get_n_splits(X) k = 1 G1, G2, S, Total = 0, 0, 0, 0 (AUC, p, r, f1) = (0, 0, 0, 0) for train_index, test_index in kf.split(X): # print("TRAIN:", train_index, "TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] n_in = 20 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) train_sample = [i for i in X_train] train_lable = [i for i in y_train] gradient_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation=activation_func, output_type='softmax', use_symbolic_softmax=True) opt = hf_optimizer( p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) y_test_vector = np.zeros((X_test.shape[0], 3), dtype='int64') for count in range(0, X_test.shape[0]): if (y_test[count][0] == 0): y_test_vector[count][0] = 1 elif (y_test[count][0] == 1): y_test_vector[count][1] = 1 else: y_test_vector[count][2] = 1 (AUC_k, p_k, r_k, f1_k) = evaluation.evaluate(model, X_test, y_test_vector, 0.8) print("%s / %s Iteration:AUC: %s, Prec: %s, Rec: %s, F1: %s" % (k, k_fold, AUC_k, p_k, r_k, f1_k)) AUC = AUC + AUC_k p = p + p_k r = r + r_k f1 = f1 + f1_k print("Average: AUC: %s, Prec: %s, Rec: %s, F1: %s" % (AUC / k, p / k, r / k, f1 / k)) k += 1 AUC = AUC / k_fold p = p / k_fold r = r / k_fold f1 = f1 / k_fold return AUC, p, r, f1
def RNN_k_fold_croos_validation(sample, lable, n_updates=20): X = sample y = lable kf = KFold(n_splits=5, shuffle=True) split_num = kf.get_n_splits(X) k = 1 G1, G2, S, Total = 0, 0, 0, 0 for train_index, test_index in kf.split(X): # print("TRAIN:", train_index, "TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] n_hidden = 10 n_in = 40 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) train_sample = [i for i in X_train] train_lable = [i for i in y_train] gradient_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) opt = hf_optimizer( p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) right, first, second, third = 0, 0, 0, 0 first_sum, second_sum, third_sum = 0, 0, 0 for count in range(0, X_test.shape[0]): guess = model.predict_proba(X_test[count]) tmp_list = np.ndarray.tolist(guess.T) # print test_sample.shape if (y_test[count][0] == 0): first_sum += 1 if (tmp_list.index(max(tmp_list)) == y_test[count][0]): print True first += 1 right += 1 else: print k, False elif (y_test[count][0] == 1): second_sum += 1 if (tmp_list.index(max(tmp_list)) == y_test[count][0]): print True second += 1 right += 1 else: print False else: third_sum += 1 if (tmp_list.index(max(tmp_list)) == y_test[count][0]): print True third += 1 right += 1 else: print False print "...................................................................................................." print k, "interation" print "...................................................................................................." G1 = G1 + 1.0 * first / first_sum S = S + 1.0 * second / second_sum G2 = G2 + 1.0 * third / third_sum Total = Total + 1.0 * right / X_test.shape[0] print "class G1:", G1 / k print "class S:", S / k print "class G2:", G2 / k print "class total:", Total / k k += 1 print "...................................................................................................." print "Final Result:" print "...................................................................................................." print "class G1:", G1 / split_num print "class S:", S / split_num print "class G2:", G2 / split_num print "class total:", Total / split_num
def RNN_leave_one_cross_validation(seq, targets, n_updates=250, n_seq=182): """ Test RNN with softmax outputs. """ length = len(seq) right, first, second, third = 0, 0, 0, 0 false_list = [] for k in range(0, length): n_hidden = 10 n_in = 40 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) train_sample = copy.deepcopy(seq) train_lable = copy.deepcopy(targets) test_sample = seq[k] train_sample = np.delete(train_sample, k, 0) train_lable = np.delete(train_lable, k, 0) train_sample = [i for i in train_sample] train_lable = [i for i in train_lable] gradient_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) opt = hf_optimizer( p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) guess = model.predict_proba(test_sample) tmp_list = np.ndarray.tolist(guess.T) # print tmp_list if (targets[k][0] == 0): if (tmp_list.index(max(tmp_list)) == targets[k][0]): print k, True first += 1 right += 1 else: print k, False false_list.append(k) elif (targets[k][0] == 1): if (tmp_list.index(max(tmp_list)) == targets[k][0]): print k, True second += 1 right += 1 else: print k, False false_list.append(k) else: if (tmp_list.index(max(tmp_list)) == targets[k][0]): print k, True third += 1 right += 1 else: print k, False false_list.append(k) print "class G1:", 1.0 * first / 59 print "class S:", 1.0 * second / 58 print "class G2:", 1.0 * third / 65 print "class total:", 1.0 * right / length print false_list