def test_real(n_updates=100): """ Test RNN with real-valued outputs. """ n_hidden = 10 n_in = 5 n_out = 3 n_steps = 10 n_seq = 1000 np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) print 'seq1' print seq targets = np.zeros((n_seq, n_steps, n_out)) targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1 targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1 targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2 targets += 0.01 * np.random.standard_normal(targets.shape) print 'targets' print targets # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] print 'seq2' print seq targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) print model opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) plt.close('all') fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[0]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.plot(targets[0]) guess = model.predict(seq[0]) guessed_targets = plt.plot(guess, linestyle='--') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_title('solid: true output, dashed: model output')
def test_Oscillator(n_updates=50): """ Test RNN with real-valued outputs. """ n_hidden = 250 n_in = 1 n_out = 2 n_steps = 200 n_seq = 1 fracZero = 2 np.random.seed(np.random.randint(200)+52) baseFreq = np.linspace(0,numpy.pi*4,n_steps) seq = np.zeros((n_seq,n_steps,n_in)) #sigDecay = np.zeros((n_steps/2)) #sigDecay[:(n_steps/fracZero)/2] = np.linspace(1,0,(n_steps/fracZero)/2) #seqSig = sigDecay * np.cos(baseFreq)/2 + 1 #seq[:,n_steps/4:n_steps*3/4,0] = np.tile(seqSig,(n_seq,1)) #seq[:,n_steps/4:3*n_steps/4,0]] seq[:,:n_steps/fracZero,0] = np.tile(np.linspace(1,0,num=n_steps/fracZero) * np.cos(baseFreq[:n_steps/fracZero]),(n_seq,1)) #seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps, n_out)) targets[:, :, 0] = np.tile(np.cos(baseFreq),(n_seq,1)) targets[:, :, 1] = np.tile(np.sin(baseFreq),(n_seq,1)) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=2) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=2) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='cappedrelu') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) plt.close('all') fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[0]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.plot(targets[0]) guess = model.predict(seq[0]) guessed_targets = plt.plot(guess, linestyle='--') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_title('solid: true output, dashed: model output') return model
def test_real(n_updates=100): """ Test RNN with real-valued outputs. """ n_hidden = 10 n_in = 5 n_out = 3 n_steps = 10 n_seq = 1000 np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps, n_out)) targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1 targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1 targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2 targets += 0.01 * np.random.standard_normal(targets.shape) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) plt.close('all') fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[0]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.plot(targets[0]) guess = model.predict(seq[0]) guessed_targets = plt.plot(guess, linestyle='--') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_title('solid: true output, dashed: model output')
def TrainEM(train_scores,train_vad_ts,train_features_ts): # prepare data for training [input_matrix,target_matrix] = PrepareData(train_vad_ts,train_features_ts) n_in = input_matrix.shape[2] n_out = target_matrix.shape[2] n_hidden = 10 not_converged = True while not_converged: model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,\ learning_rate=0.001, learning_rate_decay=0.999,\ n_epochs=400, activation='tanh') model.fit(input_matrix, target_matrix, validation_frequency=1000)
def __init__(self, word2vec_path, label_path, model_path, n_in, n_hidden): self.n_in = n_in self.n_hidden = n_hidden # # Initialize the label vectorID # self.word2label = {} self.label2word = {} with open(label_path, 'r', encoding='UTF-8') as file: for line in file: lines = line.strip('\n').split('\t') label = lines[0] word = lines[1] self.word2label[word] = label self.label2word[label] = word self.n_out = len(self.label2word) # # Initialize the word2vec model (model feature dim has 200) # self.model = {} with open(word2vec_path) as fin: for line in fin: items = line.replace('\r', '').replace('\n', '').split(' ') if len(items) < 10: continue word = items[0] vect = numpy.array([float(i) for i in items[1:]]) # if len(i) > 1]) if vect.shape[0] != 200: print(vect) self.model[word] = vect # # Initialize the RNN Model # self.RNN = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=self.n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) save = pickle.load(open(model_path, 'rb'), encoding='latin1') cg_last_x, best, lambda_, first_iteration, init_p = save for i, j in zip(self.RNN.rnn.params, init_p): i.set_value(j) inputs = [self.RNN.x, self.RNN.y] cost = self.RNN.rnn.loss(self.RNN.y) self.f_pred = theano.function(inputs, cost, on_unused_input='ignore')
def HFTest(seq, targets, t_seq, t_targets, n_hidden=10, n_updates=250): """ Test RNN with hessian free optimization """ n_in = 2 n_out = 2 n_classes = 10 # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) mse_updates = [] for i in range(n_updates): opt.train(gradient_dataset, cg_dataset, num_updates=1) mse = 0 for t in range(len(t_seq)): guess = model.predict_proba(t_seq[t]) if guess != t_target: mse += 1 mse_updates.append(mse) print i return (mse_updates, model)
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh # class 2 # if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh # class 0 targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2 #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0]) ''' print("Convert to SequenceDataset ...") gradient_dataset = SequenceDataset([x_seq_list, y_seq_list], batch_size=None, number_batches=100) cg_dataset = SequenceDataset([x_seq_list, y_seq_list], batch_size=None, number_batches=50) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, n_epochs=n_epochs, activation='tanh', output_type='softmax', use_symbolic_softmax=True, L2_reg=0.0001) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, n_in=n_in, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) print("Training ...") opt.train(gradient_dataset, cg_dataset, initial_lambda=1.0, num_updates=n_updates, save_progress='save_param') seqs = range(10) plt.close('all')
[n_seq, n_steps, n_in] = seq.shape n_hidden = 2 * n_in n_out = 24 seqs = [i for i in seqs] targets = [i for i in target] gradient_dataset = SequenceDataset([seqs, targets], batch_size=None, number_batches=80) cg_dataset = SequenceDataset([seqs, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_mul=n_hidden, n_out=n_out, activation='tanh') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=30) path = '/root/chentian/share/201605/one_ped1/%d' % index os.mkdir(path) path0 = path + '/predict.csv' f3 = file(path0, "a") for seq_num in range(0, n_seq):
#Creating parameters for training RNN n_hidden = 5 # M n_in = 3 # D n_out = 1 # K n_steps = 1 # the length of each sequence n_seq = len(food_sel_scaled_train) # the number of datapoints (i.e. sequences) #Creating input and output arrays for training RNN rating = np.array(food_sel_scaled_train[['rating','price.tier','stats.checkinsCount']]).reshape(n_seq,n_steps,n_in) score = np.array(food_sel_scaled_train['SCORE']).reshape(n_seq,n_steps,n_out) #Creating the model and feeding it with training data set model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, learning_rate=0.001, learning_rate_decay=0.999, n_epochs=50, activation='tanh') model.fit(rating, score, validation_frequency=5000) guess = model.predict(rating.reshape(len(rating), n_in)) scores_pred = pd.DataFrame(guess) scores_pred.columns = ['predictions'] scores_pred.predictions.plot(kind='hist', bins=20, figsize=(6,4), grid=True, title = "Histogram of predicted DOH normalized Scores (IS)", alpha=0.8) food_sel_scaled_train.SCORE.plot(kind='hist', bins=20, figsize=(6,4), grid=True, title = "Histogram of actual DOH normalized Scores (IS)", alpha=0.8) #Creating parameters for training RNN n_hidden = 5 # M n_in = 3 # D
a = clean_text.clean_text(line) a = a.split(' ') for i in range(len(a)): word = a[i] if word not in word2label: word2label[word] = labelCount label2word[labelCount] = word labelCount += 1 n_hidden = 100 n_in = word_vec_len n_out = len(label2word) RNN = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) save = pickle.load(open(model_path, 'rb'), encoding='latin1') cg_last_x, best, lambda_, first_iteration, init_p = save for i, j in zip(RNN.rnn.params, init_p): i.set_value(j) inputs = [RNN.x, RNN.y] #costs = [model.rnn.loss(model.y), model.rnn.errors(model.y)] cost = RNN.rnn.loss(RNN.y) f_pred = theano.function(inputs, cost, on_unused_input='ignore') test_str = "i am fire thin q"
def test_binary(multiple_out=False, n_updates=250): """ Test RNN with binary outputs. """ n_hidden = 10 n_in = 5 if multiple_out: n_out = 2 else: n_out = 1 n_steps = 10 n_seq = 100 np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps, n_out), dtype='int32') # whether lag 1 (dim 3) is greater than lag 2 (dim 0) targets[:, 2:, 0] = np.cast[np.int32](seq[:, 1:-1, 3] > seq[:, :-2, 0]) if multiple_out: # whether product of lag 1 (dim 4) and lag 1 (dim 2) # is less than lag 2 (dim 0) targets[:, 2:, 1] = np.cast[np.int32]( (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0]) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='binary') # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) # using settings of initial_lambda and mu given in Nicolas' RNN example # seem to do a little worse than the default opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) seqs = xrange(10) plt.close('all') for seq_num in seqs: fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[seq_num]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o') guess = model.predict_proba(seq[seq_num]) guessed_targets = plt.step(xrange(n_steps), guess) plt.setp(guessed_targets, linestyle='--', marker='d') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_ylim((-0.1, 1.1)) ax2.set_title('solid: true output, dashed: model output (prob)')
def RNN_leave_one_cross_validation(seq, targets, n_updates=250, n_seq=182): """ Test RNN with softmax outputs. """ length = len(seq) right, first, second, third = 0, 0, 0, 0 false_list = [] for k in range(0, length): n_hidden = 10 n_in = 40 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) train_sample = copy.deepcopy(seq) train_lable = copy.deepcopy(targets) test_sample = seq[k] train_sample = np.delete(train_sample, k, 0) train_lable = np.delete(train_lable, k, 0) train_sample = [i for i in train_sample] train_lable = [i for i in train_lable] gradient_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) opt = hf_optimizer( p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) guess = model.predict_proba(test_sample) tmp_list = np.ndarray.tolist(guess.T) # print tmp_list if (targets[k][0] == 0): if (tmp_list.index(max(tmp_list)) == targets[k][0]): print k, True first += 1 right += 1 else: print k, False false_list.append(k) elif (targets[k][0] == 1): if (tmp_list.index(max(tmp_list)) == targets[k][0]): print k, True second += 1 right += 1 else: print k, False false_list.append(k) else: if (tmp_list.index(max(tmp_list)) == targets[k][0]): print k, True third += 1 right += 1 else: print k, False false_list.append(k) print "class G1:", 1.0 * first / 59 print "class S:", 1.0 * second / 58 print "class G2:", 1.0 * third / 65 print "class total:", 1.0 * right / length print false_list
n_in = 3 # D n_out = 1 # K n_steps = 1 # the length of each sequence n_seq = len(food_sel_scaled_train) # the number of datapoints (i.e. sequences) #Creating input and output arrays for training RNN rating = np.array(food_sel_scaled_train[[ 'rating', 'price.tier', 'stats.checkinsCount' ]]).reshape(n_seq, n_steps, n_in) score = np.array(food_sel_scaled_train['SCORE']).reshape(n_seq, n_steps, n_out) #Creating the model and feeding it with training data set model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, learning_rate=0.001, learning_rate_decay=0.999, n_epochs=50, activation='tanh') model.fit(rating, score, validation_frequency=5000) guess = model.predict(rating.reshape(len(rating), n_in)) scores_pred = pd.DataFrame(guess) scores_pred.columns = ['predictions'] scores_pred.predictions.plot( kind='hist', bins=20, figsize=(6, 4), grid=True,
def test_binary(multiple_out=False, n_updates=250): """ Test RNN with binary outputs. """ n_hidden = 10 n_in = 5 if multiple_out: n_out = 2 else: n_out = 1 n_steps = 10 n_seq = 100 np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps, n_out), dtype='int32') # whether lag 1 (dim 3) is greater than lag 2 (dim 0) targets[:, 2:, 0] = np.cast[np.int32](seq[:, 1:-1, 3] > seq[:, :-2, 0]) if multiple_out: # whether product of lag 1 (dim 4) and lag 1 (dim 2) # is less than lag 2 (dim 0) targets[:, 2:, 1] = np.cast[np.int32]( (seq[:, 1:-1, 4] * seq[:, 1:-1, 2]) > seq[:, :-2, 0]) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='binary') # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) # using settings of initial_lambda and mu given in Nicolas' RNN example # seem to do a little worse than the default opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) seqs = range(10) plt.close('all') for seq_num in seqs: fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[seq_num]) ax1.set_title('input') ax2 = plt.subplot(212) true_targets = plt.step(range(n_steps), targets[seq_num], marker='o') guess = model.predict_proba(seq[seq_num]) guessed_targets = plt.step(range(n_steps), guess) plt.setp(guessed_targets, linestyle='--', marker='d') for i, x in enumerate(guessed_targets): x.set_color(true_targets[i].get_color()) ax2.set_ylim((-0.1, 1.1)) ax2.set_title('solid: true output, dashed: model output (prob)')
def test_softmax(n_updates=250): """ Test RNN with softmax outputs. """ n_hidden = 10 n_in = 5 n_steps = 10 n_seq = 100 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps), dtype='int32') thresh = 0.5 # if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh # class 1 # if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh # class 2 # if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh # class 0 targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2 #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0]) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) # using settings of initial_lambda and mu given in Nicolas' RNN example # seem to do a little worse than the default opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) seqs = range(10) plt.close('all') for seq_num in seqs: fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[seq_num]) ax1.set_title('input') ax2 = plt.subplot(212) # blue line will represent true classes true_targets = plt.step(range(n_steps), targets[seq_num], marker='o') # show probabilities (in b/w) output by model guess = model.predict_proba(seq[seq_num]) guessed_probs = plt.imshow(guess.T, interpolation='nearest', cmap='gray') ax2.set_title('blue: true class, grayscale: probs assigned by model')
def test_softmax(n_updates=250): """ Test RNN with softmax outputs. """ n_hidden = 10 n_in = 5 n_steps = 10 n_seq = 100 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) # simple lag test seq = np.random.randn(n_seq, n_steps, n_in) targets = np.zeros((n_seq, n_steps), dtype='int32') thresh = 0.5 # if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh # class 1 # if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh # class 2 # if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh # class 0 targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2 #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0]) # SequenceDataset wants a list of sequences # this allows them to be different lengths, but here they're not seq = [i for i in seq] targets = [i for i in targets] gradient_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([seq, targets], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) # using settings of initial_lambda and mu given in Nicolas' RNN example # seem to do a little worse than the default opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) seqs = xrange(10) plt.close('all') for seq_num in seqs: fig = plt.figure() ax1 = plt.subplot(211) plt.plot(seq[seq_num]) ax1.set_title('input') ax2 = plt.subplot(212) # blue line will represent true classes true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o') # show probabilities (in b/w) output by model guess = model.predict_proba(seq[seq_num]) guessed_probs = plt.imshow(guess.T, interpolation='nearest', cmap='gray') ax2.set_title('blue: true class, grayscale: probs assigned by model')
def RNN_k_fold_croos_validation(sample, lable, n_updates=20): X = sample y = lable kf = KFold(n_splits=5, shuffle=True) split_num = kf.get_n_splits(X) k = 1 G1, G2, S, Total = 0, 0, 0, 0 for train_index, test_index in kf.split(X): # print("TRAIN:", train_index, "TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] n_hidden = 10 n_in = 40 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) train_sample = [i for i in X_train] train_lable = [i for i in y_train] gradient_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation='tanh', output_type='softmax', use_symbolic_softmax=True) opt = hf_optimizer( p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) right, first, second, third = 0, 0, 0, 0 first_sum, second_sum, third_sum = 0, 0, 0 for count in range(0, X_test.shape[0]): guess = model.predict_proba(X_test[count]) tmp_list = np.ndarray.tolist(guess.T) # print test_sample.shape if (y_test[count][0] == 0): first_sum += 1 if (tmp_list.index(max(tmp_list)) == y_test[count][0]): print True first += 1 right += 1 else: print k, False elif (y_test[count][0] == 1): second_sum += 1 if (tmp_list.index(max(tmp_list)) == y_test[count][0]): print True second += 1 right += 1 else: print False else: third_sum += 1 if (tmp_list.index(max(tmp_list)) == y_test[count][0]): print True third += 1 right += 1 else: print False print "...................................................................................................." print k, "interation" print "...................................................................................................." G1 = G1 + 1.0 * first / first_sum S = S + 1.0 * second / second_sum G2 = G2 + 1.0 * third / third_sum Total = Total + 1.0 * right / X_test.shape[0] print "class G1:", G1 / k print "class S:", S / k print "class G2:", G2 / k print "class total:", Total / k k += 1 print "...................................................................................................." print "Final Result:" print "...................................................................................................." print "class G1:", G1 / split_num print "class S:", S / split_num print "class G2:", G2 / split_num print "class total:", Total / split_num
def RNN_Evaluation(sample, lable, n_hidden=10, activation_func='tanh', n_updates=20, k_fold=5): X = sample y = lable kf = KFold(n_splits=k_fold, shuffle=True) split_num = kf.get_n_splits(X) k = 1 G1, G2, S, Total = 0, 0, 0, 0 (AUC, p, r, f1) = (0, 0, 0, 0) for train_index, test_index in kf.split(X): # print("TRAIN:", train_index, "TEST:", test_index) X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] n_in = 20 n_classes = 3 n_out = n_classes # restricted to single softmax per time step np.random.seed(0) train_sample = [i for i in X_train] train_lable = [i for i in y_train] gradient_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([train_sample, train_lable], batch_size=None, number_batches=100) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, activation=activation_func, output_type='softmax', use_symbolic_softmax=True) opt = hf_optimizer( p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=n_updates) y_test_vector = np.zeros((X_test.shape[0], 3), dtype='int64') for count in range(0, X_test.shape[0]): if (y_test[count][0] == 0): y_test_vector[count][0] = 1 elif (y_test[count][0] == 1): y_test_vector[count][1] = 1 else: y_test_vector[count][2] = 1 (AUC_k, p_k, r_k, f1_k) = evaluation.evaluate(model, X_test, y_test_vector, 0.8) print("%s / %s Iteration:AUC: %s, Prec: %s, Rec: %s, F1: %s" % (k, k_fold, AUC_k, p_k, r_k, f1_k)) AUC = AUC + AUC_k p = p + p_k r = r + r_k f1 = f1 + f1_k print("Average: AUC: %s, Prec: %s, Rec: %s, F1: %s" % (AUC / k, p / k, r / k, f1 / k)) k += 1 AUC = AUC / k_fold p = p / k_fold r = r / k_fold f1 = f1 / k_fold return AUC, p, r, f1
# class 0 targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1 targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2 #targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0]) ''' y_seq_list = x_seq_list #gradient_dataset = SequenceDataset([x_seq_list, y_seq_list], batch_size=None, number_batches=100) #cg_dataset = SequenceDataset([x_seq_list, y_seq_list], batch_size=None, number_batches=50) print("Convert to SequenceDataset ...") gradient_dataset = SequenceDataset([x_seq_list, x_seq_list], batch_size=None, number_batches=500) cg_dataset = SequenceDataset([x_seq_list, x_seq_list], batch_size=None, number_batches=1000) model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out, n_epochs=n_epochs, activation='tanh', output_type='softmax', use_symbolic_softmax=True, L2_reg=0.0001) # optimizes negative log likelihood # but also reports zero-one error opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, n_in=n_in, word2vec=word2vec, word2label=word2label, costs=[model.rnn.loss(model.y), model.rnn.errors(model.y)], h=model.rnn.h) print("Training ...") opt.train(gradient_dataset, cg_dataset, num_updates=n_updates, save_progress='save_param') seqs = range(10)
cg_dataset = SequenceDataset([seqs, targets], batch_size=None, number_batches=20) model = MetaRNN(n_in=n_in, n_mul=n_hidden, n_out=n_out, activation='tanh') opt = hf_optimizer(p=model.rnn.params, inputs=[model.x, model.y], s=model.rnn.y_pred, costs=[model.rnn.loss(model.y)], h=model.rnn.h) opt.train(gradient_dataset, cg_dataset, num_updates=300)''' model = MetaRNN(n_in=n_in, n_mul=n_hidden, n_out=n_out, learning_rate=0.001, learning_rate_decay=0.999, n_epochs=n_epochs, L1_reg=0.005, activation='tanh', output_type='real', index=index) model.fit(seqs, targets, validation_frequency=1000) print "traning over" path = '/root/chentian/share/201605/ped2' # os.makedirs(path) path0 = path + '/predict.csv' f3 = file(path0, "a") for k in range(0, 3000): guess = model.predict(test_seqs[k]) np.savetxt(f3, guess, delimiter=',') f3.close()