def evaluate_lenet5(learning_rate=0.5, n_epochs=2000, batch_size=500, emb_size=300, hidden_size=300, L2_weight=0.0001, para_len_limit=700, q_len_limit=40): model_options = locals().copy() print "model options", model_options rootPath='/mounts/data/proj/wenpeng/Dataset/SQuAD/'; rng = numpy.random.RandomState(23455) train_para_list, train_Q_list, train_label_list, train_para_mask, train_mask, word2id, train_feature_matrixlist=load_train(para_len_limit, q_len_limit) train_size=len(train_para_list) if train_size!=len(train_Q_list) or train_size!=len(train_label_list) or train_size!=len(train_para_mask): print 'train_size!=len(Q_list) or train_size!=len(label_list) or train_size!=len(para_mask)' exit(0) test_para_list, test_Q_list, test_para_mask, test_mask, overall_vocab_size, overall_word2id, test_text_list, q_ansSet_list, test_feature_matrixlist= load_dev_or_test(word2id, para_len_limit, q_len_limit) test_size=len(test_para_list) if test_size!=len(test_Q_list) or test_size!=len(test_mask) or test_size!=len(test_para_mask): print 'test_size!=len(test_Q_list) or test_size!=len(test_mask) or test_size!=len(test_para_mask)' exit(0) id2word = {y:x for x,y in overall_word2id.iteritems()} word2vec=load_word2vec() rand_values=random_value_normal((overall_vocab_size+1, emb_size), theano.config.floatX, numpy.random.RandomState(1234)) # rand_values[0]=numpy.array(numpy.zeros(emb_size),dtype=theano.config.floatX) rand_values=load_word2vec_to_init(rand_values, id2word, word2vec) embeddings=theano.shared(value=rand_values, borrow=True) # allocate symbolic variables for the data # index = T.lscalar() paragraph = T.imatrix('paragraph') questions = T.imatrix('questions') labels = T.imatrix('labels') para_mask=T.fmatrix('para_mask') q_mask=T.fmatrix('q_mask') extraF=T.ftensor3('extraF') # should be in shape (batch, wordsize, 3) ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' # Reshape matrix of rasterized images of shape (batch_size,28*28) # to a 4D tensor, compatible with our LeNetConvPoolLayer #layer0_input = x.reshape(((batch_size*4), 1, ishape[0], ishape[1])) paragraph_input = embeddings[paragraph.flatten()].reshape((paragraph.shape[0], paragraph.shape[1], emb_size)).transpose((0, 2,1)) # (batch_size, emb_size, maxparalen) # # # BdGRU(rng, str(0), shape, X, mask, is_train = 1, batch_size = 1, p = 0.5) # U1, W1, b1=create_GRU_para(rng, emb_size, hidden_size) U1_b, W1_b, b1_b=create_GRU_para(rng, emb_size, hidden_size) paragraph_para=[U1, W1, b1, U1_b, W1_b, b1_b] paragraph_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=paragraph_input, Mask=para_mask, hidden_dim=hidden_size,U=U1,W=W1,b=b1,Ub=U1_b,Wb=W1_b,bb=b1_b) para_reps=paragraph_model.output_tensor #(batch, emb, para_len) Qs_emb = embeddings[questions.flatten()].reshape((questions.shape[0], questions.shape[1], emb_size)).transpose((0, 2,1)) #(#questions, emb_size, maxsenlength) UQ, WQ, bQ=create_GRU_para(rng, emb_size, hidden_size) UQ_b, WQ_b, bQ_b=create_GRU_para(rng, emb_size, hidden_size) Q_para=[UQ, WQ, bQ, UQ_b, WQ_b, bQ_b] questions_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=Qs_emb, Mask=q_mask, hidden_dim=hidden_size, U=UQ,W=WQ,b=bQ, Ub=UQ_b, Wb=WQ_b, bb=bQ_b) questions_reps=questions_model.output_sent_rep_maxpooling.reshape((batch_size, 1, hidden_size)) #(batch, 2*out_size) #questions_reps=T.repeat(questions_reps, para_reps.shape[2], axis=1) #attention distributions W_a1 = create_ensemble_para(rng, hidden_size, hidden_size)# init_weights((2*hidden_size, hidden_size)) W_a2 = create_ensemble_para(rng, hidden_size, hidden_size) U_a = create_ensemble_para(rng, 2, hidden_size+3) # 3 extra features norm_W_a1=normalize_matrix(W_a1) norm_W_a2=normalize_matrix(W_a2) norm_U_a=normalize_matrix(U_a) LR_b = theano.shared(value=numpy.zeros((2,), dtype=theano.config.floatX), # @UndefinedVariable name='LR_b', borrow=True) attention_paras=[W_a1, W_a2, U_a, LR_b] transformed_para_reps=T.tanh(T.dot(para_reps.transpose((0, 2,1)), norm_W_a2)) transformed_q_reps=T.tanh(T.dot(questions_reps, norm_W_a1)) #transformed_q_reps=T.repeat(transformed_q_reps, transformed_para_reps.shape[1], axis=1) add_both=0.5*(transformed_para_reps+transformed_q_reps) prior_att=T.concatenate([add_both, normalize_matrix(extraF)], axis=2) #prior_att=T.concatenate([transformed_para_reps, transformed_q_reps], axis=2) valid_indices=para_mask.flatten().nonzero()[0] layer3=LogisticRegression(rng, input=prior_att.reshape((batch_size*prior_att.shape[1], hidden_size+3)), n_in=hidden_size+3, n_out=2, W=norm_U_a, b=LR_b) #error =layer3.negative_log_likelihood(labels.flatten()[valid_indices]) error = -T.mean(T.log(layer3.p_y_given_x)[valid_indices, labels.flatten()[valid_indices]])#[T.arange(y.shape[0]), y]) distributions=layer3.p_y_given_x[:,-1].reshape((batch_size, para_mask.shape[1])) #distributions=layer3.y_pred.reshape((batch_size, para_mask.shape[1])) masked_dis=distributions*para_mask ''' strength = T.tanh(T.dot(prior_att, norm_U_a)) #(batch, #word, 1) distributions=debug_print(strength.reshape((batch_size, paragraph.shape[1])), 'distributions') para_mask=para_mask masked_dis=distributions*para_mask # masked_label=debug_print(labels*para_mask, 'masked_label') # error=((masked_dis-masked_label)**2).mean() label_mask=T.gt(labels,0.0) neg_label_mask=T.lt(labels,0.0) dis_masked=distributions*label_mask remain_dis_masked=distributions*neg_label_mask ans_size=T.sum(label_mask) non_ans_size=T.sum(neg_label_mask) pos_error=T.sum((dis_masked-label_mask)**2)/ans_size neg_error=T.sum((remain_dis_masked-(-neg_label_mask))**2)/non_ans_size error=pos_error+0.5*neg_error #(ans_size*1.0/non_ans_size)* ''' # def AttentionLayer(q_rep, ext_M): # theano_U_a=debug_print(norm_U_a, 'norm_U_a') # prior_att=debug_print(T.nnet.sigmoid(T.dot(q_rep, norm_W_a1).reshape((1, hidden_size)) + T.dot(paragraph_model.output_matrix.transpose(), norm_W_a2)), 'prior_att') # f __name__ == '__main__': # prior_att=T.concatenate([prior_att, ext_M], axis=1) # # strength = debug_print(T.tanh(T.dot(prior_att, theano_U_a)), 'strength') #(#word, 1) # return strength.transpose() #(1, #words) # distributions, updates = theano.scan( # AttentionLayer, # sequences=[questions_reps,extraF] ) # distributions=debug_print(distributions.reshape((questions.shape[0],paragraph.shape[0])), 'distributions') # labels=debug_print(labels, 'labels') # label_mask=T.gt(labels,0.0) # neg_label_mask=T.lt(labels,0.0) # dis_masked=distributions*label_mask # remain_dis_masked=distributions*neg_label_mask # pos_error=((dis_masked-1)**2).mean() # neg_error=((remain_dis_masked-(-1))**2).mean() # error=pos_error+(T.sum(label_mask)*1.0/T.sum(neg_label_mask))*neg_error #params = layer3.params + layer2.params + layer1.params+ [conv_W, conv_b] params = [embeddings]+paragraph_para+Q_para+attention_paras L2_reg =L2norm_paraList([embeddings,U1, W1, U1_b, W1_b,UQ, WQ, UQ_b, WQ_b, W_a1, W_a2, U_a]) #L2_reg = L2norm_paraList(params) cost=error#+L2_weight*L2_reg accumulator=[] for para_i in params: eps_p=numpy.zeros_like(para_i.get_value(borrow=True),dtype=theano.config.floatX) accumulator.append(theano.shared(eps_p, borrow=True)) # create a list of gradients for all model parameters grads = T.grad(cost, params) updates = [] for param_i, grad_i, acc_i in zip(params, grads, accumulator): # print grad_i.type acc = acc_i + T.sqr(grad_i) updates.append((param_i, param_i - learning_rate * grad_i / (T.sqrt(acc)+1e-8))) #AdaGrad updates.append((acc_i, acc)) train_model = theano.function([paragraph, questions,labels, para_mask, q_mask, extraF], error, updates=updates,on_unused_input='ignore') test_model = theano.function([paragraph, questions,para_mask, q_mask, extraF], masked_dis, on_unused_input='ignore') ############### # TRAIN MODEL # ############### print '... training' # early-stopping parameters patience = 500000000000000 # look as this many examples regardless best_params = None best_validation_loss = numpy.inf best_iter = 0 test_score = 0. start_time = time.time() mid_time = start_time past_time= mid_time epoch = 0 done_looping = False #para_list, Q_list, label_list, mask, vocab_size=load_train() n_train_batches=train_size/batch_size # remain_train=train_size%batch_size train_batch_start=list(numpy.arange(n_train_batches)*batch_size)+[train_size-batch_size] n_test_batches=test_size/batch_size # remain_test=test_size%batch_size test_batch_start=list(numpy.arange(n_test_batches)*batch_size)+[test_size-batch_size] max_exact_acc=0.0 cost_i=0.0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 #shuffle(train_batch_start) iter_accu=0 for para_id in train_batch_start: # iter means how many batches have been runed, taking into loop iter = (epoch - 1) * n_train_batches + iter_accu +1 iter_accu+=1 # haha=para_mask[para_id:para_id+batch_size] # print haha # for i in range(batch_size): # print len(haha[i]) cost_i+= train_model( np.asarray(train_para_list[para_id:para_id+batch_size], dtype='int32'), np.asarray(train_Q_list[para_id:para_id+batch_size], dtype='int32'), np.asarray(train_label_list[para_id:para_id+batch_size], dtype='int32'), np.asarray(train_para_mask[para_id:para_id+batch_size], dtype=theano.config.floatX), np.asarray(train_mask[para_id:para_id+batch_size], dtype=theano.config.floatX), np.asarray(train_feature_matrixlist[para_id:para_id+batch_size], dtype=theano.config.floatX)) #print iter if iter%10==0: print 'Epoch ', epoch, 'iter '+str(iter)+' average cost: '+str(cost_i/iter), 'uses ', (time.time()-past_time)/60.0, 'min' print 'Testing...' past_time = time.time() exact_match=0.0 q_amount=0 for test_para_id in test_batch_start: distribution_matrix=test_model( np.asarray(test_para_list[test_para_id:test_para_id+batch_size], dtype='int32'), np.asarray(test_Q_list[test_para_id:test_para_id+batch_size], dtype='int32'), np.asarray(test_para_mask[test_para_id:test_para_id+batch_size], dtype=theano.config.floatX), np.asarray(test_mask[test_para_id:test_para_id+batch_size], dtype=theano.config.floatX), np.asarray(test_feature_matrixlist[test_para_id:test_para_id+batch_size], dtype=theano.config.floatX)) # print distribution_matrix test_para_wordlist_list=test_text_list[test_para_id:test_para_id+batch_size] para_gold_ansset_list=q_ansSet_list[test_para_id:test_para_id+batch_size] paralist_extra_features=test_feature_matrixlist[test_para_id:test_para_id+batch_size] sub_para_mask=test_para_mask[test_para_id:test_para_id+batch_size] para_len=len(test_para_wordlist_list[0]) if para_len!=len(distribution_matrix[0]): print 'para_len!=len(distribution_matrix[0]):', para_len, len(distribution_matrix[0]) exit(0) # q_size=len(distribution_matrix) q_amount+=batch_size # print q_size # print test_para_word_list for q in range(batch_size): #for each question # if len(distribution_matrix[q])!=len(test_label_matrix[q]): # print 'len(distribution_matrix[q])!=len(test_label_matrix[q]):', len(distribution_matrix[q]), len(test_label_matrix[q]) # else: # ss=len(distribution_matrix[q]) # combine_list=[] # for ii in range(ss): # combine_list.append(str(distribution_matrix[q][ii])+'('+str(test_label_matrix[q][ii])+')') # print combine_list # exit(0) # print 'distribution_matrix[q]:',distribution_matrix[q] pred_ans=extract_ansList_attentionList(test_para_wordlist_list[q], distribution_matrix[q], np.asarray(paralist_extra_features[q], dtype=theano.config.floatX), sub_para_mask[q]) q_gold_ans_set=para_gold_ansset_list[q] F1=MacroF1(pred_ans, q_gold_ans_set) exact_match+=F1 # match_amount=len(pred_ans_set & q_gold_ans_set) # # print 'q_gold_ans_set:', q_gold_ans_set # # print 'pred_ans_set:', pred_ans_set # if match_amount>0: # exact_match+=match_amount*1.0/len(pred_ans_set) exact_acc=exact_match/q_amount if exact_acc> max_exact_acc: max_exact_acc=exact_acc print 'current average F1:', exact_acc, '\t\tmax F1:', max_exact_acc if patience <= iter: done_looping = True break print 'Epoch ', epoch, 'uses ', (time.time()-mid_time)/60.0, 'min' mid_time = time.time() #print 'Batch_size: ', update_freq end_time = time.time() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i,'\ 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
def evaluate_lenet5(learning_rate=0.01, n_epochs=2000, batch_size=500, test_batch_size=1000, emb_size=300, hidden_size=300, HL_hidden_size=200, L2_weight=0.0001, train_size=None, test_size=None, batch_size_pred=1000, para_len=60, question_len=20, c_len=7, e_len=2): model_options = locals().copy() print "model options", model_options rootPath='/mounts/work/hs/yin/20161219/'; storePath='/mounts/data/proj/wenpeng/Dataset/SQuAD/' rng = np.random.RandomState(23455) word2id={} word2id['UNK']=0 # use it to pad word2id, train_questions,train_questions_mask,train_paras,train_paras_mask,train_e_ids,train_e_masks,train_c_ids,train_c_masks, train_c_heads,train_c_tails,train_l_heads,train_l_tails,train_e_heads,train_e_tails,train_labels, train_labels_3c=load_SQUAD_hinrich_v2(train_size, para_len, question_len, e_len, c_len, word2id, rootPath+'squadnewtrn.txt') word2id, test_questions,test_questions_mask,test_paras,test_paras_mask,test_e_ids,test_e_masks,test_c_ids,test_c_masks, test_c_heads,test_c_tails,test_l_heads,test_l_tails,test_e_heads,test_e_tails,test_labels, test_labels_3c=load_SQUAD_hinrich_v2(test_size, para_len, question_len, e_len, c_len,word2id, rootPath+'squadnewdev.txt') print 'word2id size for bigger dataset:', len(word2id) word2id, train_questions,train_questions_mask,train_paras,train_paras_mask,train_e_ids,train_e_masks,train_c_ids,train_c_masks, train_c_heads,train_c_tails,train_l_heads,train_l_tails,train_e_heads,train_e_tails,train_labels, train_labels_3c=load_SQUAD_hinrich_v2(train_size, para_len, question_len,e_len, c_len, word2id, rootPath+'squadnewtrn,subset.000.txt') word2id, test_questions,test_questions_mask,test_paras,test_paras_mask,test_e_ids,test_e_masks,test_c_ids,test_c_masks, test_c_heads,test_c_tails,test_l_heads,test_l_tails,test_e_heads,test_e_tails,test_labels, test_labels_3c=load_SQUAD_hinrich_v2(test_size, para_len, question_len, e_len, c_len,word2id, rootPath+'squadnewdev,subset.000.txt') print 'word2id size for smaller dataset:', len(word2id) # if len(train_questions)!=train_size or len(test_questions)!=test_size: # print 'len(questions)!=train_size or len(test_questions)!=test_size:', len(train_questions),train_size,len(test_questions),test_size # exit(0) train_size=len(train_questions) test_size = len(test_questions) train_questions = np.asarray(train_questions, dtype='int32') # print train_questions[:10,:] # exit(0) train_questions_mask = np.asarray(train_questions_mask, dtype=theano.config.floatX) train_paras = np.asarray(train_paras, dtype='int32') train_paras_mask = np.asarray(train_paras_mask, dtype=theano.config.floatX) train_e_ids = np.asarray(train_e_ids, dtype='int32') train_e_masks = np.asarray(train_e_masks, dtype=theano.config.floatX) train_c_ids = np.asarray(train_c_ids, dtype='int32') train_c_masks = np.asarray(train_c_masks, dtype=theano.config.floatX) train_c_heads = np.asarray(train_c_heads, dtype='int32') train_c_tails = np.asarray(train_c_tails, dtype='int32') train_l_heads = np.asarray(train_l_heads, dtype='int32') train_l_tails = np.asarray(train_l_tails, dtype='int32') train_e_heads = np.asarray(train_e_heads, dtype='int32') train_e_tails = np.asarray(train_e_tails, dtype='int32') train_labels = np.asarray(train_labels, dtype='int32') train_labels_3c = np.asarray(train_labels_3c, dtype='int32') test_questions = np.asarray(test_questions, dtype='int32') test_questions_mask = np.asarray(test_questions_mask, dtype=theano.config.floatX) test_paras = np.asarray(test_paras, dtype='int32') test_paras_mask = np.asarray(test_paras_mask, dtype=theano.config.floatX) test_e_ids = np.asarray(test_e_ids, dtype='int32') test_e_masks = np.asarray(test_e_masks, dtype=theano.config.floatX) test_c_ids = np.asarray(test_c_ids, dtype='int32') test_c_masks = np.asarray(test_c_masks, dtype=theano.config.floatX) test_c_heads = np.asarray(test_c_heads, dtype='int32') test_c_tails = np.asarray(test_c_tails, dtype='int32') test_l_heads = np.asarray(test_l_heads, dtype='int32') test_l_tails = np.asarray(test_l_tails, dtype='int32') test_e_heads = np.asarray(test_e_heads, dtype='int32') test_e_tails = np.asarray(test_e_tails, dtype='int32') test_labels = np.asarray(test_labels, dtype='int32') overall_vocab_size=len(word2id) print 'train size:', train_size, 'test size:', test_size, 'vocab size:', overall_vocab_size rand_values=random_value_normal((overall_vocab_size+1, emb_size), theano.config.floatX, rng) rand_values[0]=np.array(np.zeros(emb_size),dtype=theano.config.floatX) id2word = {y:x for x,y in word2id.iteritems()} word2vec=load_word2vec() rand_values=load_word2vec_to_init(rand_values, id2word, word2vec) embeddings=theano.shared(value=rand_values, borrow=True) # allocate symbolic variables for the data # index = T.lscalar() para=T.imatrix() #(2*batch, len) para_mask=T.fmatrix() #(2*batch, len) c_ids=T.imatrix() #(2*batch, len) c_mask=T.fmatrix() #(2*batch, len) e_ids=T.imatrix() #(2*batch, len) e_mask=T.fmatrix() #(2*batch, len) c_heads=T.ivector() #batch c_tails=T.ivector() #batch l_heads=T.ivector() #batch l_tails=T.ivector() #batch e_heads=T.ivector() #batch e_tails=T.ivector() #batch q=T.imatrix() #(2*batch, len_q) q_mask=T.fmatrix() #(2*batch, len_q) labels=T.ivector() #batch ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' true_batch_size = para.shape[0] # U_p, W_p, b_p=create_GRU_para(rng, emb_size, hidden_size) # U_p_b, W_p_b, b_p_b=create_GRU_para(rng, emb_size, hidden_size) # GRU_p_para=[U_p, W_p, b_p, U_p_b, W_p_b, b_p_b] # # U_q, W_q, b_q=create_GRU_para(rng, emb_size, hidden_size) # U_q_b, W_q_b, b_q_b=create_GRU_para(rng, emb_size, hidden_size) # GRU_q_para=[U_q, W_q, b_q, U_q_b, W_q_b, b_q_b] paragraph_input = embeddings[para.flatten()].reshape((true_batch_size, para_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, para_len) q_input = embeddings[q.flatten()].reshape((true_batch_size, question_len, emb_size)).transpose((0, 2,1)) # (batch, emb_size, question_len) fwd_LSTM_para_dict=create_LSTM_para(rng, emb_size, hidden_size) bwd_LSTM_para_dict=create_LSTM_para(rng, emb_size, hidden_size) paragraph_para=fwd_LSTM_para_dict.values()+ bwd_LSTM_para_dict.values()# .values returns a list of parameters paragraph_model=Bd_LSTM_Batch_Tensor_Input_with_Mask_Concate(paragraph_input, para_mask, hidden_size, fwd_LSTM_para_dict, bwd_LSTM_para_dict) paragraph_reps_tensor3=paragraph_model.output_tensor #(batch, 2*hidden, paralen) # paragraph_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=paragraph_input, Mask=para_mask, hidden_dim=hidden_size,U=U_p,W=W_p,b=b_p,Ub=U_p_b,Wb=W_p_b,bb=b_p_b) # paragraph_reps_tensor3=paragraph_model.output_tensor_conc #(batch, 2*hidden, para_len) fwd_LSTM_q_dict=create_LSTM_para(rng, emb_size, hidden_size) bwd_LSTM_q_dict=create_LSTM_para(rng, emb_size, hidden_size) question_para=fwd_LSTM_q_dict.values()+ bwd_LSTM_q_dict.values()# .values returns a list of parameters questions_model=Bd_LSTM_Batch_Tensor_Input_with_Mask_Concate(q_input, q_mask, hidden_size, fwd_LSTM_q_dict, bwd_LSTM_q_dict) q_reps=questions_model.output_sent_rep_maxpooling #(batch, 2*hidden) # q_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=q_input, Mask=q_mask, hidden_dim=hidden_size,U=U_q,W=W_q,b=b_q,Ub=U_q_b,Wb=W_q_b,bb=b_q_b) # q_reps=q_model.output_sent_rep_conc #(batch, 2*hidden) #interaction batch_ids=T.arange(true_batch_size) c_heads_reps=paragraph_reps_tensor3[batch_ids,:,c_heads] #(batch, 2*hidden) c_tails_reps=paragraph_reps_tensor3[batch_ids,:,c_tails] #(batch, 2*hidden) candididates_reps=T.concatenate([c_heads_reps, c_tails_reps], axis=1) #(batch, 4*hidden) l_heads_reps=paragraph_reps_tensor3[batch_ids,:,l_heads] #(batch, 2*hidden) l_tails_reps=paragraph_reps_tensor3[batch_ids,:,l_tails] #(batch, 2*hidden) longs_reps=T.concatenate([l_heads_reps, l_tails_reps], axis=1) #(batch, 4*hidden) e_heads_reps=paragraph_reps_tensor3[batch_ids,:,e_heads] #(batch, 2*hidden) e_tails_reps=paragraph_reps_tensor3[batch_ids,:,e_tails] #(batch, 2*hidden) extensions_reps=T.concatenate([e_heads_reps, e_tails_reps], axis=1) #(batch, 4*hidden) #glove level average c_input = embeddings[c_ids.flatten()].reshape((true_batch_size, c_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, c_len) c_sum = T.sum(c_input*c_mask.dimshuffle(0,'x',1), axis=2) #(batch, emb_size) average_C_batch = c_sum/T.sqrt(T.sum(c_sum**2, axis=1)+1e-20).dimshuffle(0,'x') e_input = embeddings[e_ids.flatten()].reshape((true_batch_size, e_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, c_len) e_sum = T.sum(e_input*e_mask.dimshuffle(0,'x',1), axis=2) #(batch, emb_size) average_E_batch = e_sum/T.sqrt(T.sum(e_sum**2, axis=1)+1e-20).dimshuffle(0,'x') # e_input = embeddings[e_ids.flatten()].reshape((true_batch_size, e_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, c_len) q_sum = T.sum(q_input*q_mask.dimshuffle(0,'x',1), axis=2) #(batch, emb_size) average_Q_batch = q_sum/T.sqrt(T.sum(q_sum**2, axis=1)+1e-20).dimshuffle(0,'x') # def submatrix_average(matrix, head, tail): # return T.mean(matrix[:, head:tail+1], axis=1) #emb_size # def submatrix_average_q(matrix, head): # return T.mean(matrix[:, head:], axis=1) #emb_size # # average_E_batch, _ = theano.scan(fn=submatrix_average, # sequences=[paragraph_input,e_heads, e_tails]) #(batch, emb_size) # average_C_batch, _ = theano.scan(fn=submatrix_average, # sequences=[paragraph_input,c_heads, c_tails]) #(batch, emb_size) # # Q_valid_len=T.cast(T.sum(q_mask, axis=1), 'int32') # # average_Q_batch, _ = theano.scan(fn=submatrix_average_q, # sequences=[q_input,-Q_valid_len]) #(batch, emb_size) #classify HL_layer_subtask_input=T.concatenate([q_reps, extensions_reps, average_E_batch, average_Q_batch], axis=1) #(batch, 6*hidden+2*emb) HL_layer_subtask_size= 6*hidden_size+2*emb_size#HL_layer_1_input_size+2*HL_hidden_size HL_layer_subtask_1=HiddenLayer(rng, input=HL_layer_subtask_input, n_in=HL_layer_subtask_size, n_out=HL_hidden_size, activation=T.tanh) HL_layer_subtask_2=HiddenLayer(rng, input=HL_layer_subtask_1.output, n_in=HL_hidden_size, n_out=HL_hidden_size, activation=T.tanh) U_subtask_a = create_ensemble_para(rng, 2, HL_hidden_size) # the weight matrix hidden_size*2 norm_U_subtask_a=normalize_matrix(U_subtask_a) LR_subtask_b = theano.shared(value=np.zeros((2,),dtype=theano.config.floatX),name='LR_b', borrow=True) #bias for each target class LR_subtask_para=[U_subtask_a, LR_subtask_b] layer_LR_subtask=LogisticRegression(rng, input=HL_layer_subtask_2.output, n_in=HL_hidden_size, n_out=2, W=norm_U_subtask_a, b=LR_subtask_b) #basically it is a multiplication between weight matrix and input feature vector HL_layer_1_input_size=14*hidden_size+3*emb_size+1 #, average_E_batch, average_C_batch, average_Q_batch HL_layer_1_input = T.concatenate([q_reps, longs_reps, extensions_reps, candididates_reps, average_E_batch, average_C_batch, average_Q_batch, layer_LR_subtask.prop_for_posi.reshape((true_batch_size,1))], axis=1) #(batch, 14*hidden_size+3*emb_size+1) HL_layer_1=HiddenLayer(rng, input=HL_layer_1_input, n_in=HL_layer_1_input_size, n_out=HL_hidden_size, activation=T.tanh) HL_layer_2=HiddenLayer(rng, input=HL_layer_1.output, n_in=HL_hidden_size, n_out=HL_hidden_size, activation=T.tanh) LR_input=HL_layer_2.output #T.concatenate([HL_layer_1_input, HL_layer_1.output, HL_layer_2.output], axis=1) #(batch, 10*hidden) LR_input_size= HL_hidden_size#HL_layer_1_input_size+2*HL_hidden_size U_a = create_ensemble_para(rng, 2, LR_input_size) # the weight matrix hidden_size*2 norm_U_a=normalize_matrix(U_a) LR_b = theano.shared(value=np.zeros((2,),dtype=theano.config.floatX),name='LR_b', borrow=True) #bias for each target class LR_para=[U_a, LR_b] layer_LR=LogisticRegression(rng, input=LR_input, n_in=LR_input_size, n_out=2, W=norm_U_a, b=LR_b) #basically it is a multiplication between weight matrix and input feature vector loss=layer_LR.negative_log_likelihood(labels)+layer_LR_subtask.negative_log_likelihood(labels) #for classification task, we usually used negative log likelihood as loss, the lower the better. params = LR_para+[embeddings]+paragraph_para+question_para+HL_layer_1.params+HL_layer_2.params+LR_subtask_para+HL_layer_subtask_1.params+HL_layer_subtask_2.params # L2_reg =L2norm_paraList([embeddings,U1, W1, U1_b, W1_b,UQ, WQ , UQ_b, WQ_b, W_a1, W_a2, U_a]) #L2_reg = L2norm_paraList(params) cost=loss#+0.0005*T.mean(U_a**2) accumulator=[] for para_i in params: eps_p=np.zeros_like(para_i.get_value(borrow=True),dtype=theano.config.floatX) accumulator.append(theano.shared(eps_p, borrow=True)) # create a list of gradients for all model parameters grads = T.grad(cost, params) updates = [] for param_i, grad_i, acc_i in zip(params, grads, accumulator): # print grad_i.type acc = acc_i + T.sqr(grad_i) updates.append((param_i, param_i - learning_rate * grad_i / (T.sqrt(acc)+1e-20))) #AdaGrad updates.append((acc_i, acc)) train_model = theano.function([para, para_mask,c_ids,c_mask,e_ids,e_mask, c_heads, c_tails, l_heads, l_tails, e_heads, e_tails, q, q_mask,labels], cost, updates=updates,on_unused_input='ignore') train_model_pred = theano.function([para, para_mask, c_ids,c_mask,e_ids,e_mask, c_heads, c_tails, l_heads, l_tails, e_heads, e_tails, q, q_mask,labels], layer_LR.y_pred, on_unused_input='ignore') test_model = theano.function([para, para_mask, c_ids,c_mask,e_ids,e_mask, c_heads, c_tails, l_heads, l_tails, e_heads, e_tails, q, q_mask,labels], [layer_LR.errors(labels),layer_LR.y_pred], on_unused_input='ignore') ############### # TRAIN MODEL # ############### print '... training' # early-stopping parameters patience = 500000000000000 # look as this many examples regardless best_params = None best_validation_loss = np.inf best_iter = 0 test_score = 0. start_time = time.time() mid_time = start_time past_time= mid_time epoch = 0 done_looping = False #para_list, Q_list, label_list, mask, vocab_size=load_train() n_train_batches=train_size/batch_size #batch_size means how many pairs train_batch_start=list(np.arange(n_train_batches)*batch_size)+[train_size-batch_size] n_train_batches_pred=train_size/batch_size_pred #batch_size means how many pairs train_batch_start_pred=list(np.arange(n_train_batches_pred)*batch_size_pred)+[train_size-batch_size_pred] n_test_batches=test_size/test_batch_size #batch_size means how many pairs test_batch_start=list(np.arange(n_test_batches)*test_batch_size)+[test_size-test_batch_size] max_acc=0.0 cost_i=0.0 train_ids = range(train_size) train_ids_pred = range(train_size) best_test_statistic=defaultdict(int) # best_train_statistic=defaultdict(int) while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 random.shuffle(train_ids) # print train_ids[:100] iter_accu=0 for para_id in train_batch_start: # iter means how many batches have been runed, taking into loop iter = (epoch - 1) * n_train_batches + iter_accu +1 iter_accu+=1 train_id_list = train_ids[para_id:para_id+batch_size] # print 'train_labels[train_id_list]:', train_labels[train_id_list] cost_i+= train_model( train_paras[train_id_list], train_paras_mask[train_id_list], train_c_ids[train_id_list], train_c_masks[train_id_list], train_e_ids[train_id_list], train_e_masks[train_id_list], train_c_heads[train_id_list], train_c_tails[train_id_list], train_l_heads[train_id_list], train_l_tails[train_id_list], train_e_heads[train_id_list], train_e_tails[train_id_list], train_questions[train_id_list], train_questions_mask[train_id_list], train_labels[train_id_list]) #print iter if iter%10==0: #iter>=200 and print 'Epoch ', epoch, 'iter '+str(iter)+'/'+str(len(train_batch_start))+' average cost: '+str(cost_i/iter), 'uses ', (time.time()-past_time)/60.0, 'min' past_time = time.time() # print 'Training Pred...' # train_statistic=defaultdict(int) # for para_id in train_batch_start_pred: # train_id_list = train_ids_pred[para_id:para_id+batch_size_pred] # gold_train_labels_list = train_labels_3c[train_id_list] # # print 'train_id_list:', train_id_list # # print 'train_c_heads[train_id_list]:', train_c_heads[train_id_list] # train_preds_i= train_model_pred( # train_paras[train_id_list], # train_paras_mask[train_id_list], # train_c_ids[train_id_list], # train_c_masks[train_id_list], # train_e_ids[train_id_list], # train_e_masks[train_id_list], # train_c_heads[train_id_list], # train_c_tails[train_id_list], # train_l_heads[train_id_list], # train_l_tails[train_id_list], # train_e_heads[train_id_list], # train_e_tails[train_id_list], # train_questions[train_id_list], # train_questions_mask[train_id_list], # train_labels[train_id_list]) # # for ind, gold_label in enumerate(gold_train_labels_list): # train_statistic[(gold_label, train_preds_i[ind])]+=1 # train_acc= (train_statistic.get((1,1),0)+train_statistic.get((0,0),0))*1.0/(train_statistic.get((1,1),0)+train_statistic.get((0,0),0)+train_statistic.get((1,0),0)+train_statistic.get((0,1),0)) # # print '\t\tcurrnt train acc:', train_acc, ' train_statistic:', train_statistic print 'Testing...' error=0 test_statistic=defaultdict(int) for test_para_id in test_batch_start: test_id_list = range(test_para_id, test_para_id+test_batch_size) # print 'test_id_list:',test_id_list # print 'test_c_heads[test_id_list]', test_c_heads[test_id_list] gold_labels_list = test_labels_3c[test_para_id:test_para_id+test_batch_size] error_i, preds_i= test_model( test_paras[test_id_list], test_paras_mask[test_id_list], test_c_ids[test_id_list], test_c_masks[test_id_list], test_e_ids[test_id_list], test_e_masks[test_id_list], test_c_heads[test_id_list], test_c_tails[test_id_list], test_l_heads[test_id_list], test_l_tails[test_id_list], test_e_heads[test_id_list], test_e_tails[test_id_list], test_questions[test_id_list], test_questions_mask[test_id_list], test_labels[test_id_list]) error+=error_i for ind, gold_label in enumerate(gold_labels_list): test_statistic[(gold_label, preds_i[ind])]+=1 # acc=1.0-error*1.0/len(test_batch_start) acc= (test_statistic.get((1,1),0)+test_statistic.get((0,0),0))*1.0/(test_statistic.get((1,1),0)+test_statistic.get((0,0),0)+test_statistic.get((1,0),0)+test_statistic.get((0,1),0)) if acc> max_acc: max_acc=acc best_test_statistic=test_statistic store_model_to_file(storePath+'Best_Paras_HS_v2_000_subtask_'+str(max_acc), params) print 'Finished storing best params at:', max_acc print 'current average acc:', acc, '\t\tmax acc:', max_acc, '\ttest_statistic:', test_statistic print '\t\t\t\tbest statistic:', best_test_statistic if patience <= iter: done_looping = True break print 'Epoch ', epoch, 'uses ', (time.time()-mid_time)/60.0, 'min' mid_time = time.time() #print 'Batch_size: ', update_freq end_time = time.time() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i,'\ 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
def evaluate_lenet5(learning_rate=0.5, n_epochs=2000, batch_size=500, emb_size=300, hidden_size=300, L2_weight=0.0001, para_len_limit=700, q_len_limit=40): model_options = locals().copy() print "model options", model_options rootPath = '/mounts/data/proj/wenpeng/Dataset/SQuAD/' rng = numpy.random.RandomState(23455) train_para_list, train_Q_list, train_label_list, train_para_mask, train_mask, word2id, train_feature_matrixlist = load_train( para_len_limit, q_len_limit) train_size = len(train_para_list) if train_size != len(train_Q_list) or train_size != len( train_label_list) or train_size != len(train_para_mask): print 'train_size!=len(Q_list) or train_size!=len(label_list) or train_size!=len(para_mask)' exit(0) test_para_list, test_Q_list, test_para_mask, test_mask, overall_vocab_size, overall_word2id, test_text_list, q_ansSet_list, test_feature_matrixlist = load_dev_or_test( word2id, para_len_limit, q_len_limit) test_size = len(test_para_list) if test_size != len(test_Q_list) or test_size != len( test_mask) or test_size != len(test_para_mask): print 'test_size!=len(test_Q_list) or test_size!=len(test_mask) or test_size!=len(test_para_mask)' exit(0) id2word = {y: x for x, y in overall_word2id.iteritems()} word2vec = load_word2vec() rand_values = random_value_normal((overall_vocab_size + 1, emb_size), theano.config.floatX, numpy.random.RandomState(1234)) # rand_values[0]=numpy.array(numpy.zeros(emb_size),dtype=theano.config.floatX) rand_values = load_word2vec_to_init(rand_values, id2word, word2vec) embeddings = theano.shared(value=rand_values, borrow=True) # allocate symbolic variables for the data # index = T.lscalar() paragraph = T.imatrix('paragraph') questions = T.imatrix('questions') labels = T.imatrix('labels') para_mask = T.fmatrix('para_mask') q_mask = T.fmatrix('q_mask') extraF = T.ftensor3('extraF') # should be in shape (batch, wordsize, 3) ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' # Reshape matrix of rasterized images of shape (batch_size,28*28) # to a 4D tensor, compatible with our LeNetConvPoolLayer #layer0_input = x.reshape(((batch_size*4), 1, ishape[0], ishape[1])) paragraph_input = embeddings[paragraph.flatten()].reshape( (paragraph.shape[0], paragraph.shape[1], emb_size)).transpose( (0, 2, 1)) # (batch_size, emb_size, maxparalen) # # # BdGRU(rng, str(0), shape, X, mask, is_train = 1, batch_size = 1, p = 0.5) # U1, W1, b1 = create_GRU_para(rng, emb_size, hidden_size) U1_b, W1_b, b1_b = create_GRU_para(rng, emb_size, hidden_size) paragraph_para = [U1, W1, b1, U1_b, W1_b, b1_b] paragraph_model = Bd_GRU_Batch_Tensor_Input_with_Mask( X=paragraph_input, Mask=para_mask, hidden_dim=hidden_size, U=U1, W=W1, b=b1, Ub=U1_b, Wb=W1_b, bb=b1_b) para_reps = paragraph_model.output_tensor #(batch, emb, para_len) Qs_emb = embeddings[questions.flatten()].reshape( (questions.shape[0], questions.shape[1], emb_size)).transpose( (0, 2, 1)) #(#questions, emb_size, maxsenlength) UQ, WQ, bQ = create_GRU_para(rng, emb_size, hidden_size) UQ_b, WQ_b, bQ_b = create_GRU_para(rng, emb_size, hidden_size) Q_para = [UQ, WQ, bQ, UQ_b, WQ_b, bQ_b] questions_model = Bd_GRU_Batch_Tensor_Input_with_Mask( X=Qs_emb, Mask=q_mask, hidden_dim=hidden_size, U=UQ, W=WQ, b=bQ, Ub=UQ_b, Wb=WQ_b, bb=bQ_b) questions_reps = questions_model.output_sent_rep_maxpooling.reshape( (batch_size, 1, hidden_size)) #(batch, 2*out_size) #questions_reps=T.repeat(questions_reps, para_reps.shape[2], axis=1) #attention distributions W_a1 = create_ensemble_para( rng, hidden_size, hidden_size) # init_weights((2*hidden_size, hidden_size)) W_a2 = create_ensemble_para(rng, hidden_size, hidden_size) U_a = create_ensemble_para(rng, 2, hidden_size + 3) # 3 extra features norm_W_a1 = normalize_matrix(W_a1) norm_W_a2 = normalize_matrix(W_a2) norm_U_a = normalize_matrix(U_a) LR_b = theano.shared( value=numpy.zeros((2, ), dtype=theano.config.floatX), # @UndefinedVariable name='LR_b', borrow=True) attention_paras = [W_a1, W_a2, U_a, LR_b] transformed_para_reps = T.tanh( T.dot(para_reps.transpose((0, 2, 1)), norm_W_a2)) transformed_q_reps = T.tanh(T.dot(questions_reps, norm_W_a1)) #transformed_q_reps=T.repeat(transformed_q_reps, transformed_para_reps.shape[1], axis=1) add_both = 0.5 * (transformed_para_reps + transformed_q_reps) prior_att = T.concatenate([add_both, normalize_matrix(extraF)], axis=2) #prior_att=T.concatenate([transformed_para_reps, transformed_q_reps], axis=2) valid_indices = para_mask.flatten().nonzero()[0] layer3 = LogisticRegression(rng, input=prior_att.reshape( (batch_size * prior_att.shape[1], hidden_size + 3)), n_in=hidden_size + 3, n_out=2, W=norm_U_a, b=LR_b) #error =layer3.negative_log_likelihood(labels.flatten()[valid_indices]) error = -T.mean( T.log(layer3.p_y_given_x) [valid_indices, labels.flatten()[valid_indices]]) #[T.arange(y.shape[0]), y]) distributions = layer3.p_y_given_x[:, -1].reshape( (batch_size, para_mask.shape[1])) #distributions=layer3.y_pred.reshape((batch_size, para_mask.shape[1])) masked_dis = distributions * para_mask ''' strength = T.tanh(T.dot(prior_att, norm_U_a)) #(batch, #word, 1) distributions=debug_print(strength.reshape((batch_size, paragraph.shape[1])), 'distributions') para_mask=para_mask masked_dis=distributions*para_mask # masked_label=debug_print(labels*para_mask, 'masked_label') # error=((masked_dis-masked_label)**2).mean() label_mask=T.gt(labels,0.0) neg_label_mask=T.lt(labels,0.0) dis_masked=distributions*label_mask remain_dis_masked=distributions*neg_label_mask ans_size=T.sum(label_mask) non_ans_size=T.sum(neg_label_mask) pos_error=T.sum((dis_masked-label_mask)**2)/ans_size neg_error=T.sum((remain_dis_masked-(-neg_label_mask))**2)/non_ans_size error=pos_error+0.5*neg_error #(ans_size*1.0/non_ans_size)* ''' # def AttentionLayer(q_rep, ext_M): # theano_U_a=debug_print(norm_U_a, 'norm_U_a') # prior_att=debug_print(T.nnet.sigmoid(T.dot(q_rep, norm_W_a1).reshape((1, hidden_size)) + T.dot(paragraph_model.output_matrix.transpose(), norm_W_a2)), 'prior_att') # f __name__ == '__main__': # prior_att=T.concatenate([prior_att, ext_M], axis=1) # # strength = debug_print(T.tanh(T.dot(prior_att, theano_U_a)), 'strength') #(#word, 1) # return strength.transpose() #(1, #words) # distributions, updates = theano.scan( # AttentionLayer, # sequences=[questions_reps,extraF] ) # distributions=debug_print(distributions.reshape((questions.shape[0],paragraph.shape[0])), 'distributions') # labels=debug_print(labels, 'labels') # label_mask=T.gt(labels,0.0) # neg_label_mask=T.lt(labels,0.0) # dis_masked=distributions*label_mask # remain_dis_masked=distributions*neg_label_mask # pos_error=((dis_masked-1)**2).mean() # neg_error=((remain_dis_masked-(-1))**2).mean() # error=pos_error+(T.sum(label_mask)*1.0/T.sum(neg_label_mask))*neg_error #params = layer3.params + layer2.params + layer1.params+ [conv_W, conv_b] params = [embeddings] + paragraph_para + Q_para + attention_paras L2_reg = L2norm_paraList( [embeddings, U1, W1, U1_b, W1_b, UQ, WQ, UQ_b, WQ_b, W_a1, W_a2, U_a]) #L2_reg = L2norm_paraList(params) cost = error #+L2_weight*L2_reg accumulator = [] for para_i in params: eps_p = numpy.zeros_like(para_i.get_value(borrow=True), dtype=theano.config.floatX) accumulator.append(theano.shared(eps_p, borrow=True)) # create a list of gradients for all model parameters grads = T.grad(cost, params) updates = [] for param_i, grad_i, acc_i in zip(params, grads, accumulator): # print grad_i.type acc = acc_i + T.sqr(grad_i) updates.append((param_i, param_i - learning_rate * grad_i / (T.sqrt(acc) + 1e-8))) #AdaGrad updates.append((acc_i, acc)) train_model = theano.function( [paragraph, questions, labels, para_mask, q_mask, extraF], error, updates=updates, on_unused_input='ignore') test_model = theano.function( [paragraph, questions, para_mask, q_mask, extraF], masked_dis, on_unused_input='ignore') ############### # TRAIN MODEL # ############### print '... training' # early-stopping parameters patience = 500000000000000 # look as this many examples regardless best_params = None best_validation_loss = numpy.inf best_iter = 0 test_score = 0. start_time = time.time() mid_time = start_time past_time = mid_time epoch = 0 done_looping = False #para_list, Q_list, label_list, mask, vocab_size=load_train() n_train_batches = train_size / batch_size # remain_train=train_size%batch_size train_batch_start = list(numpy.arange(n_train_batches) * batch_size) + [train_size - batch_size] n_test_batches = test_size / batch_size # remain_test=test_size%batch_size test_batch_start = list( numpy.arange(n_test_batches) * batch_size) + [test_size - batch_size] max_exact_acc = 0.0 cost_i = 0.0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 #shuffle(train_batch_start) iter_accu = 0 for para_id in train_batch_start: # iter means how many batches have been runed, taking into loop iter = (epoch - 1) * n_train_batches + iter_accu + 1 iter_accu += 1 # haha=para_mask[para_id:para_id+batch_size] # print haha # for i in range(batch_size): # print len(haha[i]) cost_i += train_model( np.asarray(train_para_list[para_id:para_id + batch_size], dtype='int32'), np.asarray(train_Q_list[para_id:para_id + batch_size], dtype='int32'), np.asarray(train_label_list[para_id:para_id + batch_size], dtype='int32'), np.asarray(train_para_mask[para_id:para_id + batch_size], dtype=theano.config.floatX), np.asarray(train_mask[para_id:para_id + batch_size], dtype=theano.config.floatX), np.asarray(train_feature_matrixlist[para_id:para_id + batch_size], dtype=theano.config.floatX)) #print iter if iter % 10 == 0: print 'Epoch ', epoch, 'iter ' + str( iter) + ' average cost: ' + str(cost_i / iter), 'uses ', ( time.time() - past_time) / 60.0, 'min' print 'Testing...' past_time = time.time() exact_match = 0.0 q_amount = 0 for test_para_id in test_batch_start: distribution_matrix = test_model( np.asarray(test_para_list[test_para_id:test_para_id + batch_size], dtype='int32'), np.asarray(test_Q_list[test_para_id:test_para_id + batch_size], dtype='int32'), np.asarray(test_para_mask[test_para_id:test_para_id + batch_size], dtype=theano.config.floatX), np.asarray(test_mask[test_para_id:test_para_id + batch_size], dtype=theano.config.floatX), np.asarray( test_feature_matrixlist[test_para_id:test_para_id + batch_size], dtype=theano.config.floatX)) # print distribution_matrix test_para_wordlist_list = test_text_list[ test_para_id:test_para_id + batch_size] para_gold_ansset_list = q_ansSet_list[ test_para_id:test_para_id + batch_size] paralist_extra_features = test_feature_matrixlist[ test_para_id:test_para_id + batch_size] sub_para_mask = test_para_mask[test_para_id:test_para_id + batch_size] para_len = len(test_para_wordlist_list[0]) if para_len != len(distribution_matrix[0]): print 'para_len!=len(distribution_matrix[0]):', para_len, len( distribution_matrix[0]) exit(0) # q_size=len(distribution_matrix) q_amount += batch_size # print q_size # print test_para_word_list for q in range(batch_size): #for each question # if len(distribution_matrix[q])!=len(test_label_matrix[q]): # print 'len(distribution_matrix[q])!=len(test_label_matrix[q]):', len(distribution_matrix[q]), len(test_label_matrix[q]) # else: # ss=len(distribution_matrix[q]) # combine_list=[] # for ii in range(ss): # combine_list.append(str(distribution_matrix[q][ii])+'('+str(test_label_matrix[q][ii])+')') # print combine_list # exit(0) # print 'distribution_matrix[q]:',distribution_matrix[q] pred_ans = extract_ansList_attentionList( test_para_wordlist_list[q], distribution_matrix[q], np.asarray(paralist_extra_features[q], dtype=theano.config.floatX), sub_para_mask[q]) q_gold_ans_set = para_gold_ansset_list[q] F1 = MacroF1(pred_ans, q_gold_ans_set) exact_match += F1 # match_amount=len(pred_ans_set & q_gold_ans_set) # # print 'q_gold_ans_set:', q_gold_ans_set # # print 'pred_ans_set:', pred_ans_set # if match_amount>0: # exact_match+=match_amount*1.0/len(pred_ans_set) exact_acc = exact_match / q_amount if exact_acc > max_exact_acc: max_exact_acc = exact_acc print 'current average F1:', exact_acc, '\t\tmax F1:', max_exact_acc if patience <= iter: done_looping = True break print 'Epoch ', epoch, 'uses ', (time.time() - mid_time) / 60.0, 'min' mid_time = time.time() #print 'Batch_size: ', update_freq end_time = time.time() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i,'\ 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
def evaluate_lenet5(learning_rate=0.1, n_epochs=2000, batch_size=500, test_batch_size=500, emb_size=300, hidden_size=300, L2_weight=0.0001, margin=0.5, train_size=4000000, test_size=1000, max_context_len=25, max_span_len=7, max_q_len=40, max_EM=0.052): model_options = locals().copy() print "model options", model_options rootPath='/mounts/data/proj/wenpeng/Dataset/SQuAD/'; rng = np.random.RandomState(23455) word2id,train_questions,train_questions_mask,train_lefts,train_lefts_mask,train_spans,train_spans_mask,train_rights,train_rights_mask=load_SQUAD_hinrich(train_size, max_context_len, max_span_len, max_q_len) test_ground_truth,test_candidates,test_questions,test_questions_mask,test_lefts,test_lefts_mask,test_spans,test_spans_mask,test_rights,test_rights_mask=load_dev_hinrich(word2id, test_size, max_context_len, max_span_len, max_q_len) overall_vocab_size=len(word2id) print 'vocab size:', overall_vocab_size rand_values=random_value_normal((overall_vocab_size+1, emb_size), theano.config.floatX, np.random.RandomState(1234)) # rand_values[0]=np.array(np.zeros(emb_size),dtype=theano.config.floatX) id2word = {y:x for x,y in word2id.iteritems()} word2vec=load_word2vec() rand_values=load_word2vec_to_init(rand_values, id2word, word2vec) embeddings=theano.shared(value=rand_values, borrow=True) # allocate symbolic variables for the data # index = T.lscalar() left=T.imatrix() #(2*batch, len) left_mask=T.fmatrix() #(2*batch, len) span=T.imatrix() #(2*batch, span_len) span_mask=T.fmatrix() #(2*batch, span_len) right=T.imatrix() #(2*batch, len) right_mask=T.fmatrix() #(2*batch, len) q=T.imatrix() #(2*batch, len_q) q_mask=T.fmatrix() #(2*batch, len_q) ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' U1, W1, b1=create_GRU_para(rng, emb_size, hidden_size) U1_b, W1_b, b1_b=create_GRU_para(rng, emb_size, hidden_size) GRU1_para=[U1, W1, b1, U1_b, W1_b, b1_b] U2, W2, b2=create_GRU_para(rng, hidden_size, hidden_size) U2_b, W2_b, b2_b=create_GRU_para(rng, hidden_size, hidden_size) GRU2_para=[U2, W2, b2, U2_b, W2_b, b2_b] W_a1 = create_ensemble_para(rng, hidden_size, hidden_size)# init_weights((2*hidden_size, hidden_size)) W_a2 = create_ensemble_para(rng, hidden_size, hidden_size) attend_para=[W_a1, W_a2] params = [embeddings]+GRU1_para+attend_para+GRU2_para # load_model_from_file(rootPath+'Best_Para_dim'+str(emb_size), params) left_input = embeddings[left.flatten()].reshape((left.shape[0], left.shape[1], emb_size)).transpose((0, 2,1)) # (2*batch_size, emb_size, len_context) span_input = embeddings[span.flatten()].reshape((span.shape[0], span.shape[1], emb_size)).transpose((0, 2,1)) # (2*batch_size, emb_size, len_span) right_input = embeddings[right.flatten()].reshape((right.shape[0], right.shape[1], emb_size)).transpose((0, 2,1)) # (2*batch_size, emb_size, len_context) q_input = embeddings[q.flatten()].reshape((q.shape[0], q.shape[1], emb_size)).transpose((0, 2,1)) # (2*batch_size, emb_size, len_q) left_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=left_input, Mask=left_mask, hidden_dim=hidden_size,U=U1,W=W1,b=b1,Ub=U1_b,Wb=W1_b,bb=b1_b) left_reps=left_model.output_tensor #(batch, emb, para_len) span_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=span_input, Mask=span_mask, hidden_dim=hidden_size,U=U1,W=W1,b=b1,Ub=U1_b,Wb=W1_b,bb=b1_b) span_reps=span_model.output_tensor #(batch, emb, para_len) right_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=right_input, Mask=right_mask, hidden_dim=hidden_size,U=U1,W=W1,b=b1,Ub=U1_b,Wb=W1_b,bb=b1_b) right_reps=right_model.output_tensor #(batch, emb, para_len) q_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=q_input, Mask=q_mask, hidden_dim=hidden_size,U=U1,W=W1,b=b1,Ub=U1_b,Wb=W1_b,bb=b1_b) q_reps=q_model.output_tensor #(batch, emb, para_len) #interaction left_reps_via_q_reps, q_reps_via_left_reps=attention_dot_prod_between_2tensors(left_reps, q_reps) span_reps_via_q_reps, q_reps_via_span_reps=attention_dot_prod_between_2tensors(span_reps, q_reps) right_reps_via_q_reps, q_reps_via_right_reps=attention_dot_prod_between_2tensors(right_reps, q_reps) # q_reps_via_left_reps=attention_dot_prod_between_2tensors(q_reps, left_reps) # q_reps_via_span_reps=attention_dot_prod_between_2tensors(q_reps, span_reps) # q_reps_via_right_reps=attention_dot_prod_between_2tensors(q_reps, right_reps) #combine origin_W=normalize_matrix(W_a1) attend_W=normalize_matrix(W_a2) left_origin_reps=T.dot(left_reps.dimshuffle(0, 2,1), origin_W) span_origin_reps=T.dot(span_reps.dimshuffle(0, 2,1), origin_W) right_origin_reps=T.dot(right_reps.dimshuffle(0, 2,1), origin_W) q_origin_reps=T.dot(q_reps.dimshuffle(0, 2,1), origin_W) left_attend_q_reps=T.dot(q_reps_via_left_reps.dimshuffle(0, 2,1), attend_W) span_attend_q_reps=T.dot(q_reps_via_span_reps.dimshuffle(0, 2,1), attend_W) right_attend_q_reps=T.dot(q_reps_via_right_reps.dimshuffle(0, 2,1), attend_W) q_attend_left_reps=T.dot(left_reps_via_q_reps.dimshuffle(0, 2,1), attend_W) q_attend_span_reps=T.dot(span_reps_via_q_reps.dimshuffle(0, 2,1), attend_W) q_attend_right_reps=T.dot(right_reps_via_q_reps.dimshuffle(0, 2,1), attend_W) add_left=left_origin_reps+q_attend_left_reps #(2*batch, len ,hidden) add_span=span_origin_reps+q_attend_span_reps add_right=right_origin_reps+q_attend_right_reps add_q_by_left=q_origin_reps+left_attend_q_reps add_q_by_span=q_origin_reps+span_attend_q_reps add_q_by_right=q_origin_reps+right_attend_q_reps #second GRU add_left_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=add_left.dimshuffle(0,2,1), Mask=left_mask, hidden_dim=hidden_size,U=U2,W=W2,b=b2,Ub=U2_b,Wb=W2_b,bb=b2_b) add_left_reps=add_left_model.output_sent_rep_maxpooling #(batch, hidden_dim) add_span_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=add_span.dimshuffle(0,2,1), Mask=span_mask, hidden_dim=hidden_size,U=U2,W=W2,b=b2,Ub=U2_b,Wb=W2_b,bb=b2_b) add_span_reps=add_span_model.output_sent_rep_maxpooling #(batch, hidden_dim) add_right_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=add_right.dimshuffle(0,2,1), Mask=right_mask, hidden_dim=hidden_size,U=U2,W=W2,b=b2,Ub=U2_b,Wb=W2_b,bb=b2_b) add_right_reps=add_right_model.output_sent_rep_maxpooling #(batch, hidden_dim) add_q_by_left_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=add_q_by_left.dimshuffle(0,2,1), Mask=q_mask, hidden_dim=hidden_size,U=U2,W=W2,b=b2,Ub=U2_b,Wb=W2_b,bb=b2_b) add_q_by_left_reps=add_q_by_left_model.output_sent_rep_maxpooling #(batch, hidden_dim) add_q_by_span_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=add_q_by_span.dimshuffle(0,2,1), Mask=q_mask, hidden_dim=hidden_size,U=U2,W=W2,b=b2,Ub=U2_b,Wb=W2_b,bb=b2_b) add_q_by_span_reps=add_q_by_span_model.output_sent_rep_maxpooling #(batch, hidden_dim) add_q_by_right_model=Bd_GRU_Batch_Tensor_Input_with_Mask(X=add_q_by_right.dimshuffle(0,2,1), Mask=q_mask, hidden_dim=hidden_size,U=U2,W=W2,b=b2,Ub=U2_b,Wb=W2_b,bb=b2_b) add_q_by_right_reps=add_q_by_right_model.output_sent_rep_maxpooling #(batch, hidden_dim) paragraph_concat=T.concatenate([add_left_reps, add_span_reps, add_right_reps], axis=1) #(batch, 3*hidden) question_concat=T.concatenate([add_q_by_left_reps, add_q_by_span_reps, add_q_by_right_reps], axis=1) #(batch, 3*hidden) simi_list=cosine_row_wise_twoMatrix(paragraph_concat, question_concat) #(2*batch) pos_simi_vec=simi_list[::2] neg_simi_vec=simi_list[1::2] raw_loss=T.maximum(0.0, margin+neg_simi_vec-pos_simi_vec) #params = layer3.params + layer2.params + layer1.params+ [conv_W, conv_b] # L2_reg =L2norm_paraList([embeddings,U1, W1, U1_b, W1_b,UQ, WQ , UQ_b, WQ_b, W_a1, W_a2, U_a]) #L2_reg = L2norm_paraList(params) cost=T.sum(raw_loss)#+ConvGRU_1.error# accumulator=[] for para_i in params: eps_p=np.zeros_like(para_i.get_value(borrow=True),dtype=theano.config.floatX) accumulator.append(theano.shared(eps_p, borrow=True)) # create a list of gradients for all model parameters grads = T.grad(cost, params) updates = [] for param_i, grad_i, acc_i in zip(params, grads, accumulator): # print grad_i.type acc = acc_i + T.sqr(grad_i) updates.append((param_i, param_i - learning_rate * grad_i / (T.sqrt(acc)+1e-8))) #AdaGrad updates.append((acc_i, acc)) train_model = theano.function([left, left_mask, span, span_mask, right, right_mask, q, q_mask], cost, updates=updates,on_unused_input='ignore') test_model = theano.function([left, left_mask, span, span_mask, right, right_mask, q, q_mask], simi_list, on_unused_input='ignore') ############### # TRAIN MODEL # ############### print '... training' # early-stopping parameters patience = 500000000000000 # look as this many examples regardless best_params = None best_validation_loss = np.inf best_iter = 0 test_score = 0. start_time = time.time() mid_time = start_time past_time= mid_time epoch = 0 done_looping = False #para_list, Q_list, label_list, mask, vocab_size=load_train() n_train_batches=train_size/batch_size #batch_size means how many pairs remain_train=train_size%batch_size # train_batch_start=list(np.arange(n_train_batches)*batch_size*2)+[train_size*2-batch_size*2] # always ou shu if remain_train>0: train_batch_start=list(np.arange(n_train_batches)*batch_size)+[train_size-batch_size] else: train_batch_start=list(np.arange(n_train_batches)*batch_size) max_F1_acc=0.0 max_exact_acc=0.0 cost_i=0.0 train_odd_ids = list(np.arange(train_size)*2) while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 random.shuffle(train_odd_ids) iter_accu=0 for para_id in train_batch_start: # iter means how many batches have been runed, taking into loop iter = (epoch - 1) * n_train_batches + iter_accu +1 iter_accu+=1 train_id_list=[[train_odd_id, train_odd_id+1] for train_odd_id in train_odd_ids[para_id:para_id+batch_size]] train_id_list=sum(train_id_list,[]) # print train_id_list cost_i+= train_model( np.asarray([train_lefts[id] for id in train_id_list], dtype='int32'), np.asarray([train_lefts_mask[id] for id in train_id_list], dtype=theano.config.floatX), np.asarray([train_spans[id] for id in train_id_list], dtype='int32'), np.asarray([train_spans_mask[id] for id in train_id_list], dtype=theano.config.floatX), np.asarray([train_rights[id] for id in train_id_list], dtype='int32'), np.asarray([train_rights_mask[id] for id in train_id_list], dtype=theano.config.floatX), np.asarray([train_questions[id] for id in train_id_list], dtype='int32'), np.asarray([train_questions_mask[id] for id in train_id_list], dtype=theano.config.floatX)) #print iter if iter%100==0: print 'Epoch ', epoch, 'iter '+str(iter)+' average cost: '+str(cost_i/iter), 'uses ', (time.time()-past_time)/60.0, 'min' print 'Testing...' past_time = time.time() exact_match=0.0 F1_match=0.0 for test_pair_id in range(test_size): test_example_lefts=test_lefts[test_pair_id] test_example_lefts_mask=test_lefts_mask[test_pair_id] test_example_spans=test_spans[test_pair_id] test_example_spans_mask=test_spans_mask[test_pair_id] test_example_rights=test_rights[test_pair_id] test_example_rights_mask=test_rights_mask[test_pair_id] test_example_questions=test_questions[test_pair_id] test_example_questions_mask=test_questions_mask[test_pair_id] test_example_candidates=test_candidates[test_pair_id] test_example_size=len(test_example_lefts) # print 'test_pair_id, test_example_size:', test_pair_id, test_example_size if test_example_size < test_batch_size: #pad pad_size=test_batch_size-test_example_size test_example_lefts+=test_example_lefts[-1:]*pad_size test_example_lefts_mask+=test_example_lefts_mask[-1:]*pad_size test_example_spans+=test_example_spans[-1:]*pad_size test_example_spans_mask+=test_example_spans_mask[-1:]*pad_size test_example_rights+=test_example_rights[-1:]*pad_size test_example_rights_mask+=test_example_rights_mask[-1:]*pad_size test_example_questions+=test_example_questions[-1:]*pad_size test_example_questions_mask+=test_example_questions_mask[-1:]*pad_size test_example_candidates+=test_example_candidates[-1:]*pad_size test_example_size=test_batch_size n_test_batches=test_example_size/test_batch_size n_test_remain=test_example_size%test_batch_size if n_test_remain > 0: test_batch_start=list(np.arange(n_test_batches)*test_batch_size)+[test_example_size-test_batch_size] else: test_batch_start=list(np.arange(n_test_batches)*test_batch_size) all_simi_list=[] all_cand_list=[] for test_para_id in test_batch_start: simi_return_vector=test_model( np.asarray(test_example_lefts[test_para_id:test_para_id+test_batch_size], dtype='int32'), np.asarray(test_example_lefts_mask[test_para_id:test_para_id+test_batch_size], dtype=theano.config.floatX), np.asarray(test_example_spans[test_para_id:test_para_id+test_batch_size], dtype='int32'), np.asarray(test_example_spans_mask[test_para_id:test_para_id+test_batch_size], dtype=theano.config.floatX), np.asarray(test_example_rights[test_para_id:test_para_id+test_batch_size], dtype='int32'), np.asarray(test_example_rights_mask[test_para_id:test_para_id+test_batch_size], dtype=theano.config.floatX), np.asarray(test_example_questions[test_para_id:test_para_id+test_batch_size], dtype='int32'), np.asarray(test_example_questions_mask[test_para_id:test_para_id+test_batch_size], dtype=theano.config.floatX)) candidate_list=test_example_candidates[test_para_id:test_para_id+test_batch_size] all_simi_list+=list(simi_return_vector) all_cand_list+=candidate_list top1_cand=all_cand_list[np.argsort(all_simi_list)[-1]] # print top1_cand, test_ground_truth[test_pair_id] if top1_cand == test_ground_truth[test_pair_id]: exact_match+=1 F1=macrof1(top1_cand, test_ground_truth[test_pair_id]) # print '\t\t\t', F1 F1_match+=F1 # match_amount=len(pred_ans_set & q_gold_ans_set) # # print 'q_gold_ans_set:', q_gold_ans_set # # print 'pred_ans_set:', pred_ans_set # if match_amount>0: # exact_match+=match_amount*1.0/len(pred_ans_set) F1_acc=F1_match/test_size exact_acc=exact_match/test_size if F1_acc> max_F1_acc: max_F1_acc=F1_acc # store_model_to_file(params, emb_size) if exact_acc> max_exact_acc: max_exact_acc=exact_acc if max_exact_acc > max_EM: store_model_to_file(rootPath+'Best_Para_'+str(max_EM), params) print 'Finished storing best params at:', max_exact_acc print 'current average F1:', F1_acc, '\t\tmax F1:', max_F1_acc, 'current exact:', exact_acc, '\t\tmax exact_acc:', max_exact_acc if patience <= iter: done_looping = True break print 'Epoch ', epoch, 'uses ', (time.time()-mid_time)/60.0, 'min' mid_time = time.time() #print 'Batch_size: ', update_freq end_time = time.time() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i,'\ 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))
def evaluate_lenet5(learning_rate=0.005, n_epochs=2000, batch_size=300, test_batch_size=400, emb_size=50, hidden_size=300, HL_hidden_size=200, L2_weight=0.0001, train_size=None, test_size=None, batch_size_pred=400, trichar_len=15,char_emb_size=50, para_len=101, question_len=20, c_len=1, model_type='train'): model_options = locals().copy() print "model options", model_options rootPath='/mounts/Users/cisintern/hs/l/workhs/yin/20170320/'; storePath='/mounts/data/proj/wenpeng/Dataset/SQuAD/' rng = np.random.RandomState(23455) word2id={} trichar2id={} word2id['UNK']=0 # use it to pad #word2id, trichar2id, questions,questions_mask,paras,paras_mask,labels, isInQ_para, paras_shape, questions_shape, types, types_shape,question_trichar_ids,question_trichar_masks,para_trichar_ids,para_trichar_masks,type_trichar_ids,type_trichar_masks word2id, trichar2id,train_questions,train_questions_mask,train_paras,train_paras_mask,train_labels, train_islabels, train_paras_shape, train_questions_shape, train_types, train_types_shape,train_question_trichar_ids,train_question_trichar_masks,train_para_trichar_ids,train_para_trichar_masks,train_type_trichar_ids,train_type_trichar_masks=load_SQUAD_hinrich_v4(train_size, para_len, question_len, trichar_len, word2id,trichar2id, rootPath+'trn20170320.txt') word2id, trichar2id,test_questions,test_questions_mask,test_paras,test_paras_mask,test_labels, test_islabels, test_paras_shape, test_questions_shape, test_types, test_types_shape,test_question_trichar_ids,test_question_trichar_masks,test_para_trichar_ids,test_para_trichar_masks,test_type_trichar_ids,test_type_trichar_masks=load_SQUAD_hinrich_v4(test_size, para_len, question_len, trichar_len,word2id, trichar2id, rootPath+'dev.big.20170320.txt') word2id, trichar2id,test_questions,test_questions_mask,test_paras,test_paras_mask,test_labels, test_islabels, test_paras_shape, test_questions_shape, test_types, test_types_shape,test_question_trichar_ids,test_question_trichar_masks,test_para_trichar_ids,test_para_trichar_masks,test_type_trichar_ids,test_type_trichar_masks=load_SQUAD_hinrich_v4(test_size, para_len, question_len, trichar_len,word2id, trichar2id, rootPath+'dev20170320.txt') print 'word2id size for bigger dataset:', len(word2id), 'trichar size:', len(trichar2id) train_size=len(train_questions) test_size = len(test_questions) #50010# train_questions = np.asarray(train_questions, dtype='int32') train_questions_shape = np.asarray(train_questions_shape, dtype='int32') train_questions_mask = np.asarray(train_questions_mask, dtype=theano.config.floatX) train_paras = np.asarray(train_paras, dtype='int32') train_paras_shape = np.asarray(train_paras_shape, dtype='int32') train_paras_mask = np.asarray(train_paras_mask, dtype=theano.config.floatX) train_types = np.asarray(train_types, dtype='int32') train_types_shape = np.asarray(train_types_shape, dtype='int32') # train_c_ids = np.asarray(train_c_ids, dtype='int32') # train_c_ids_shape = np.asarray(train_c_ids_shape, dtype='int32') # train_c_masks = np.asarray(train_c_masks, dtype=theano.config.floatX) train_islabels = np.asarray(train_islabels, dtype=theano.config.floatX) # train_c_heads = np.asarray(train_c_heads, dtype='int32') # train_c_tails = np.asarray(train_c_tails, dtype='int32') train_labels = np.asarray(train_labels, dtype='int32') #train_question_trichar_ids,train_question_trichar_masks,train_para_trichar_ids,train_para_trichar_masks,train_type_trichar_ids,train_type_trichar_masks train_question_trichar_ids = np.asarray(train_question_trichar_ids, dtype='int32') train_question_trichar_masks = np.asarray(train_question_trichar_masks, dtype=theano.config.floatX) train_para_trichar_ids = np.asarray(train_para_trichar_ids, dtype='int32') train_para_trichar_masks = np.asarray(train_para_trichar_masks, dtype=theano.config.floatX) train_type_trichar_ids = np.asarray(train_type_trichar_ids, dtype='int32') train_type_trichar_masks = np.asarray(train_type_trichar_masks, dtype=theano.config.floatX) test_questions = np.asarray(test_questions, dtype='int32') test_questions_shape = np.asarray(test_questions_shape, dtype='int32') test_questions_mask = np.asarray(test_questions_mask, dtype=theano.config.floatX) test_paras = np.asarray(test_paras, dtype='int32') test_paras_shape = np.asarray(test_paras_shape, dtype='int32') test_paras_mask = np.asarray(test_paras_mask, dtype=theano.config.floatX) test_types = np.asarray(test_types, dtype='int32') test_types_shape = np.asarray(test_types_shape, dtype='int32') # test_c_ids = np.asarray(test_c_ids, dtype='int32') # test_c_ids_shape = np.asarray(test_c_ids_shape, dtype='int32') # test_c_masks = np.asarray(test_c_masks, dtype=theano.config.floatX) test_islabels = np.asarray(test_islabels, dtype=theano.config.floatX) # test_c_heads = np.asarray(test_c_heads, dtype='int32') # test_c_tails = np.asarray(test_c_tails, dtype='int32') test_labels = np.asarray(test_labels, dtype='int32') test_question_trichar_ids = np.asarray(test_question_trichar_ids, dtype='int32') test_question_trichar_masks = np.asarray(test_question_trichar_masks, dtype=theano.config.floatX) test_para_trichar_ids = np.asarray(test_para_trichar_ids, dtype='int32') test_para_trichar_masks = np.asarray(test_para_trichar_masks, dtype=theano.config.floatX) test_type_trichar_ids = np.asarray(test_type_trichar_ids, dtype='int32') test_type_trichar_masks = np.asarray(test_type_trichar_masks, dtype=theano.config.floatX) overall_vocab_size=len(word2id) print 'train size:', train_size, 'test size:', test_size, 'vocab size:', overall_vocab_size rand_values=random_value_normal((overall_vocab_size, emb_size), theano.config.floatX, rng) rand_values[0]=np.array(np.zeros(emb_size),dtype=theano.config.floatX) id2word = {y:x for x,y in word2id.iteritems()} word2vec=load_word2vec() rand_values=load_word2vec_to_init(rand_values, id2word, word2vec) embeddings=theano.shared(value=rand_values, borrow=True) overall_trichar_size = len(trichar2id) char_rand_values=random_value_normal((overall_trichar_size, char_emb_size), theano.config.floatX, rng) char_embeddings=theano.shared(value=char_rand_values, borrow=True) para=T.imatrix() #(2*batch, len) para_shape = T.imatrix() para_mask=T.fmatrix() #(2*batch, len) q=T.imatrix() #(2*batch, len_q) q_shape = T.imatrix() q_mask=T.fmatrix() #(2*batch, len_q) islabels = T.fmatrix() labels=T.ivector() #batch types=T.imatrix() types_shape=T.imatrix() q_trichar_ids = T.imatrix() q_trichar_masks =T.fmatrix() para_trichar_ids = T.imatrix() para_trichar_masks =T.fmatrix() type_trichar_ids = T.imatrix() type_trichar_masks =T.fmatrix() ###################### # BUILD ACTUAL MODEL # ###################### print '... building the model' true_batch_size = para.shape[0] paragraph_input = embeddings[para.flatten()].reshape((true_batch_size, para_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, para_len) q_input = embeddings[q.flatten()].reshape((true_batch_size, question_len, emb_size)).transpose((0, 2,1)) # (batch, emb_size, question_len) q_types = embeddings[types.flatten()].reshape((true_batch_size, 2, emb_size)).transpose((0, 2,1)) paragraph_input_shape = embeddings[para_shape.flatten()].reshape((true_batch_size, para_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, para_len) q_input_shape = embeddings[q_shape.flatten()].reshape((true_batch_size, question_len, emb_size)).transpose((0, 2,1)) # (batch, emb_size, question_len) q_types_shape = embeddings[types_shape.flatten()].reshape((true_batch_size, 2, emb_size)).transpose((0, 2,1)) paragraph_input_trichar = char_embeddings[para_trichar_ids.flatten()].reshape((true_batch_size, para_len*trichar_len, char_emb_size)) #(batch, char_emb_size, para_len*trichar_len) q_input_trichar = char_embeddings[q_trichar_ids.flatten()].reshape((true_batch_size, question_len*trichar_len, char_emb_size)) # (batch, emb_size, question_len) q_types_trichar = char_embeddings[type_trichar_ids.flatten()].reshape((true_batch_size, 2*trichar_len, char_emb_size)) #sum up trichar emb as word level embs paragraph_input_trichar=T.sum((paragraph_input_trichar*para_trichar_masks.dimshuffle(0,1,'x')).reshape((true_batch_size, para_len, trichar_len,char_emb_size)),axis=2).dimshuffle(0,2,1) #(true_batch_size, char_emb_size,para_len) q_input_trichar=T.sum((q_input_trichar*q_trichar_masks.dimshuffle(0,1,'x')).reshape((true_batch_size, question_len, trichar_len,char_emb_size)),axis=2).dimshuffle(0,2,1) #(true_batch_size, char_emb_size,q_len) q_types_trichar=T.sum((q_types_trichar*type_trichar_masks.dimshuffle(0,1,'x')).reshape((true_batch_size, 2, trichar_len,char_emb_size)),axis=2).dimshuffle(0,2,1) #(true_batch_size, char_emb_size,2) #concatenate word emb with shape emb q_input = T.concatenate([q_input,q_input_shape, q_input_trichar],axis=1) #(batch, 2*emb_size+char_emb_size, q_len) paragraph_input = T.concatenate([paragraph_input,paragraph_input_shape, paragraph_input_trichar,islabels.dimshuffle(0,'x',1)],axis=1)#(batch, 2*emb_size+char_emb_size+1, para_len) q_types_input = T.sum(T.concatenate([q_types,q_types_shape,q_types_trichar],axis=1), axis=2) #(batch, 2*emb+char_emb_size) fwd_LSTM_para_dict=create_LSTM_para(rng, 2*emb_size+char_emb_size+1, hidden_size) bwd_LSTM_para_dict=create_LSTM_para(rng, 2*emb_size+char_emb_size+1, hidden_size) paragraph_para=fwd_LSTM_para_dict.values()+ bwd_LSTM_para_dict.values()# .values returns a list of parameters paragraph_model=Bd_LSTM_Batch_Tensor_Input_with_Mask_Concate(paragraph_input, para_mask, hidden_size, fwd_LSTM_para_dict, bwd_LSTM_para_dict) paragraph_reps_tensor3=paragraph_model.output_tensor #(batch, 2*hidden, paralen) fwd_LSTM_q_dict=create_LSTM_para(rng, 2*emb_size+char_emb_size, hidden_size) bwd_LSTM_q_dict=create_LSTM_para(rng, 2*emb_size+char_emb_size, hidden_size) question_para=fwd_LSTM_q_dict.values()+ bwd_LSTM_q_dict.values()# .values returns a list of parameters questions_model=Bd_LSTM_Batch_Tensor_Input_with_Mask_Concate(q_input, q_mask, hidden_size, fwd_LSTM_q_dict, bwd_LSTM_q_dict) q_reps=questions_model.output_sent_rep_maxpooling #(batch, 2*hidden) #interaction batch_ids=T.arange(true_batch_size) # c_heads=theano.shared(value=np.asarray([(para_len-1)/2]*batch_size, dtype='int32'), borrow=True) c_heads = T.repeat(theano.shared(value=np.asarray([(para_len-1)/2], dtype='int32'), borrow=True), true_batch_size) c_tails=c_heads+1 c_heads_reps=paragraph_reps_tensor3[batch_ids,:,c_heads] #(batch, 2*hidden) c_tails_reps=paragraph_reps_tensor3[batch_ids,:,c_tails] #(batch, 2*hidden) candididates_reps=T.concatenate([c_heads_reps, c_tails_reps], axis=1) #(batch, 4*hidden) context_l=paragraph_model.forward_output[batch_ids,:,c_heads-1] #(batch, hidden) context_r=paragraph_model.backward_output[batch_ids,:,c_tails+1]#(batch, hidden) #glove level average # c_input = embeddings[c_ids.flatten()].reshape((true_batch_size, c_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, c_len) # c_input_shape = embeddings[c_ids_shape.flatten()].reshape((true_batch_size, c_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, c_len) # c_input = T.concatenate([c_input,c_input_shape],axis=1) c_sum = paragraph_input[:,:-1,(para_len-1)/2]#(batch, 2*emb_size+char_emb) c_sum_with_isInQLabel = paragraph_input[:,:,(para_len-1)/2] # e_input = embeddings[e_ids.flatten()].reshape((true_batch_size, e_len, emb_size)).transpose((0, 2,1)) #(batch, emb_size, c_len) q_sum = T.sum(q_input*q_mask.dimshuffle(0,'x',1), axis=2) #(batch, 2*emb_size+char_emb_size) # average_Q_batch = q_sum/T.sqrt(T.sum(q_sum**2, axis=1)+1e-20).dimshuffle(0,'x') HL_layer_1_input_size=2*hidden_size+4*hidden_size+(2*emb_size+char_emb_size+1)+(2*emb_size+char_emb_size)+1+hidden_size+hidden_size+(2*emb_size+char_emb_size)+1 cosine_Qtype_cand = cosine_row_wise_twoMatrix(q_types_input, c_sum).dimshuffle(0,'x') #(batch, 1) #, average_E_batch, average_C_batch, average_Q_batch HL_layer_1_input = T.concatenate([q_reps, candididates_reps, c_sum_with_isInQLabel, q_sum, islabels[:,(para_len-1)/2:(para_len-1)/2+1], context_l, context_r, q_types_input, cosine_Qtype_cand], axis=1) HL_layer_1=HiddenLayer(rng, input=HL_layer_1_input, n_in=HL_layer_1_input_size, n_out=HL_hidden_size, activation=T.tanh) HL_layer_2=HiddenLayer(rng, input=HL_layer_1.output, n_in=HL_hidden_size, n_out=HL_hidden_size, activation=T.tanh) LR_input= T.concatenate([HL_layer_1.output, HL_layer_2.output, islabels[:,(para_len-1)/2:(para_len-1)/2+1], cosine_Qtype_cand], axis=1) #(batch, char_HL_hidden_size+HL_hidden_size) LR_input_size= HL_hidden_size+HL_hidden_size+1+1#HL_layer_1_input_size+2*HL_hidden_size U_a = create_ensemble_para(rng, 2, LR_input_size) # the weight matrix hidden_size*2 norm_U_a=normalize_matrix(U_a) LR_b = theano.shared(value=np.zeros((2,),dtype=theano.config.floatX),name='char_LR_b', borrow=True) #bias for each target class LR_para=[U_a, LR_b] layer_LR=LogisticRegression(rng, input=LR_input, n_in=LR_input_size, n_out=2, W=norm_U_a, b=LR_b) #basically it is a multiplication between weight matrix and input feature vector loss=layer_LR.negative_log_likelihood(labels) #for classification task, we usually used negative log likelihood as loss, the lower the better. params = LR_para+[embeddings,char_embeddings]+paragraph_para+question_para+HL_layer_1.params+HL_layer_2.params # load_model_from_file(storePath+'Best_Paras_HS_20170316_0.760357142857', params) # L2_reg =L2norm_paraList([embeddings,U1, W1, U1_b, W1_b,UQ, WQ , UQ_b, WQ_b, W_a1, W_a2, U_a]) # L2_reg = L2norm_paraList(params) cost=loss#+1e-6*L2_reg accumulator=[] for para_i in params: eps_p=np.zeros_like(para_i.get_value(borrow=True),dtype=theano.config.floatX) accumulator.append(theano.shared(eps_p, borrow=True)) # create a list of gradients for all model parameters grads = T.grad(cost, params) updates = [] for param_i, grad_i, acc_i in zip(params, grads, accumulator): # print grad_i.type acc = acc_i + T.sqr(grad_i) updates.append((param_i, param_i - learning_rate * grad_i / (T.sqrt(acc)+1e-20))) #AdaGrad updates.append((acc_i, acc)) train_model = theano.function([para, para_shape, para_mask,q,q_shape, q_mask,islabels, labels, types, types_shape, q_trichar_ids,q_trichar_masks,para_trichar_ids,para_trichar_masks,type_trichar_ids,type_trichar_masks], cost, updates=updates,on_unused_input='ignore') # train_model_pred = theano.function([para, para_mask, c_ids,c_mask,e_ids,e_mask, c_heads, c_tails, l_heads, l_tails, e_heads, e_tails, q, q_mask,labels], layer_LR.y_pred, on_unused_input='ignore') test_model = theano.function([para, para_shape, para_mask, q,q_shape, q_mask,islabels, labels, types, types_shape,q_trichar_ids,q_trichar_masks,para_trichar_ids,para_trichar_masks,type_trichar_ids,type_trichar_masks], [layer_LR.errors(labels),layer_LR.prop_for_posi], on_unused_input='ignore') ############### # TRAIN MODEL # ############### print '... training' # early-stopping parameters patience = 500000000000000 # look as this many examples regardless best_params = None best_validation_loss = np.inf best_iter = 0 test_score = 0. start_time = time.time() mid_time = start_time past_time= mid_time epoch = 0 done_looping = False #para_list, Q_list, label_list, mask, vocab_size=load_train() n_train_batches=train_size/batch_size #batch_size means how many pairs train_batch_start=list(np.arange(n_train_batches)*batch_size)+[train_size-batch_size] # n_train_batches_pred=train_size/batch_size_pred #batch_size means how many pairs # train_batch_start_pred=list(np.arange(n_train_batches_pred)*batch_size_pred)+[train_size-batch_size_pred] n_test_batches=test_size/test_batch_size #batch_size means how many pairs n_test_remain=test_size%test_batch_size #batch_size means how many pairs test_batch_start=list(np.arange(n_test_batches)*test_batch_size)+[test_size-test_batch_size] max_acc=0.0 cost_i=0.0 train_ids = range(train_size) # train_ids_pred = range(train_size) best_test_statistic=defaultdict(int) # best_train_statistic=defaultdict(int) while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 random.shuffle(train_ids) # print train_ids[:100] iter_accu=0 for para_id in train_batch_start: # iter means how many batches have been runed, taking into loop iter = (epoch - 1) * n_train_batches + iter_accu +1 iter_accu+=1 train_id_list = train_ids[para_id:para_id+batch_size] # print 'train_labels[train_id_list]:', train_labels[train_id_list] if model_type=='train': #para, para_shape, para_mask,q,q_shape, q_mask,islabels, labels, types, types_shape, q_trichar_ids,q_trichar_masks,para_trichar_ids,para_trichar_masks,type_trichar_ids,type_trichar_masks cost_i+= train_model( train_paras[train_id_list], train_paras_shape[train_id_list], train_paras_mask[train_id_list], train_questions[train_id_list], train_questions_shape[train_id_list], train_questions_mask[train_id_list], train_islabels[train_id_list], train_labels[train_id_list], train_types[train_id_list], train_types_shape[train_id_list], train_question_trichar_ids[train_id_list], train_question_trichar_masks[train_id_list], train_para_trichar_ids[train_id_list], train_para_trichar_masks[train_id_list], train_type_trichar_ids[train_id_list], train_type_trichar_masks[train_id_list]) #print iter if iter%10 ==0: print 'Epoch ', epoch, 'iter '+str(iter)+'/'+str(len(train_batch_start))+' average cost: '+str(cost_i/iter), 'uses ', (time.time()-past_time)/60.0, 'min' past_time = time.time() print 'Testing...' error=0 test_statistic=defaultdict(int) if model_type=='test': writefile=open(storePath+'predictions_20170317.txt', 'w') for id, test_para_id in enumerate(test_batch_start): test_id_list = range(test_para_id, test_para_id+test_batch_size) # print 'test_id_list:',test_id_list # print 'test_c_heads[test_id_list]', test_c_heads[test_id_list] # gold_labels_list = test_labels_3c[test_para_id:test_para_id+test_batch_size] error_i, preds_i= test_model( test_paras[test_id_list], test_paras_shape[test_id_list], test_paras_mask[test_id_list], test_questions[test_id_list], test_questions_shape[test_id_list], test_questions_mask[test_id_list], test_islabels[test_id_list], test_labels[test_id_list], test_types[test_id_list], test_types_shape[test_id_list], test_question_trichar_ids[test_id_list], test_question_trichar_masks[test_id_list], test_para_trichar_ids[test_id_list], test_para_trichar_masks[test_id_list], test_type_trichar_ids[test_id_list], test_type_trichar_masks[test_id_list]) if model_type=='test': if id < len(test_batch_start)-1: writefile.write('\n'.join(map(str,list(preds_i)))+'\n') else: writefile.write('\n'.join(map(str,list(preds_i)[-n_test_remain:]))+'\n') error+=error_i # for ind, gold_label in enumerate(gold_labels_list): # test_statistic[(gold_label, preds_i[ind])]+=1 if model_type=='test': writefile.close() acc=1.0-error*1.0/len(test_batch_start) # acc= (test_statistic.get((1,1),0)+test_statistic.get((0,0),0))*1.0/(test_statistic.get((1,1),0)+test_statistic.get((0,0),0)+test_statistic.get((1,0),0)+test_statistic.get((0,1),0)) if acc> max_acc: max_acc=acc # best_test_statistic=test_statistic if model_type=='train': store_model_to_file(storePath+'Best_Paras_HS_20170324_'+str(max_acc), params) print 'Finished storing best params at:', max_acc print 'current average acc:', acc, '\t\tmax acc:', max_acc#, '\ttest_statistic:', test_statistic # print '\t\t\t\tbest statistic:', best_test_statistic if model_type=='test': exit(0) if patience <= iter: done_looping = True break print 'Epoch ', epoch, 'uses ', (time.time()-mid_time)/60.0, 'min' mid_time = time.time() #print 'Batch_size: ', update_freq end_time = time.time() print('Optimization complete.') print('Best validation score of %f %% obtained at iteration %i,'\ 'with test performance %f %%' % (best_validation_loss * 100., best_iter + 1, test_score * 100.)) print >> sys.stderr, ('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))