コード例 #1
0
ファイル: mvrnn_sll.py プロジェクト: SemanticBeeng/MVRNN
 def evaluate(self, params, rnnDataTest):
     predictLabels = []
     trueLabels = []        
     
     allSNum = rnnDataTest.allSNum
     allSTree = rnnDataTest.allSTree
     allSStr = rnnDataTest.allSStr
     verbIndices = rnnDataTest.verbIndices
     sentenceLabels = rnnDataTest.sentenceLabels
 
     ndoc = rnnDataTest.ndoc()
     print "Total number of trees/sentences to be evaluated: ", ndoc
     for s in range(ndoc):              
         if(s % 100 == 0) :
             print "Processing sentences ", s , ' - ', s+100   
         thissentVerbIndices = verbIndices[s]  
         sStr = allSStr[s]; sNum = allSNum[s]; sTree = allSTree[s]
         labels = sentenceLabels[s]
         if((len(sNum) == 1) or (len(thissentVerbIndices)==0) or (labels.shape[1] != len(sStr))):
             continue  #only one word in a sent, no verbs for this sent, tokens and labels mismatch                 
         for nverb, vid in enumerate(thissentVerbIndices):
             scoresMat = np.zeros((len(sStr), self.Wcat.shape[0]))
             for wid in range(len(sStr)):
                 indices = np.array([vid, wid])   
                 setVerbnWordDistanceFeat(self.Wv, sNum, vid, wid, params) 
                 tree = forwardPropTree(self.W, self.WO, self.Wcat, self.Wv, self.Wo, sNum, sTree, sStr, sNN=None, indicies=None,  params=params) 
                 calPredictions(tree, self.Wcat, self.Wv, indices, sStr, params) #updates score, nodepath etc for this verb, word pair
                 scoresMat[wid,:] = tree.score
             pred_answer = viterbi(scoresMat, self.Tran)
             true_answer = labels[nverb,:]
             for i in range(len(pred_answer)):
                 predictLabels.append(pred_answer[i])
                 trueLabels.append(true_answer[i])
             #TODO : calculate predicted labels     
     
     f1 = f1_score(y_true=trueLabels, y_pred=predictLabels, pos_label=None)#, labels=all_labels)
     p = precision_score(y_true=trueLabels, y_pred=predictLabels, pos_label=None)#, labels=all_labels)
     r = recall_score(y_true=trueLabels, y_pred=predictLabels, pos_label=None)#), labels=all_labels)
     print "XXXXXXX F1 = ", f1
     print "XXXXXXX P = ", p
     print "XXXXXXX R = ", r
     print 
     return predictLabels
コード例 #2
0
ファイル: mvrnn_sll.py プロジェクト: SemanticBeeng/MVRNN
def cost_oneSent(W, WO, Wcat, Wv, Wo, Tran, sNum,sTree, sStr, sNN, labels, verbIndices, params):
    ''' Returns sentence level loglikelihood cost for this sentence'''     
    
    cost = 0.0 #cost of one sentence = sum of cost of classification of each word for each verb    
    Sdf_s_Wv = None; Sdf_s_Wo = None; Sdf_s_W = None; Sdf_s_WO = None; Sdf_s_Wcat = None; Sdf_s_Tran = None
    
    #forward propagation for each verb in this sentence
    for nverb, vid in enumerate(verbIndices): 
        scoresMat = np.zeros((len(sStr), Wcat.shape[0]))  
        input_values = np.zeros((len(sStr), Wcat.shape[1]))
        forwardPropTrees = []  
        for wid in range(len(sStr)):
            indices = np.array([vid, wid])   
            setVerbnWordDistanceFeat(Wv, sNum, vid, wid, params) 
            tree = forwardPropTree(W, WO, Wcat, Wv, Wo, sNum, sTree, sStr, sNN,  params=params) #calculate nodevectors and matrices        
            calPredictions(tree, Wcat, Wv, indices, sStr, params) #updates score, nodepath etc for this verb, word pair
            scoresMat[wid,:] = tree.score
            forwardPropTrees.append(tree)
            input_values[wid,:] = np.tanh(np.concatenate((tree.pooledVecPath.flatten(), tree.features))) #this should be same as catInput

        #calculate sentence-level-loglikelihood cost
        #cost = logadd(score for all possible paths) - score(correct path)
        correct_path_score = 0
        last_label = Wcat.shape[0] #last row is the score of starting from a tag
        for i, this_label in enumerate(labels[nverb,:]):
            correct_path_score += scoresMat[i,this_label] + Tran[last_label, this_label]
            last_label = this_label            
        all_scores = calculate_all_scores(scoresMat, Tran)
        error  = np.log(np.sum(np.exp(all_scores[-1]))) - correct_path_score
        cost += error
    
        #calculate derivative of cost function
        grad_Wcat, df_s_Tran = calculate_sll_grad(scoresMat, all_scores, labels[nverb,:], Tran, Wcat)
        
        #calculate df_s_Wcat and df_s_Tran
#        top_delta = np.tile(grad_Wcat, [Wcat.shape[1], 1, 1]).T
#        df_s_Wcat = np.multiply(top_delta, input_values).sum(1)
#        top_delta = top_delta.T.sum(1)  #for backpropagating down each tree
#        df_s_Wcat = np.zeros(Wcat.shape)
#        for i in range(grad_Wcat.shape[0]):
#            df_s_Wcat += grad_Wcat[[i],:].T.dot(input_values[[i],:])
##        df_s_Wcat /= grad_Wcat.shape[0]
#             
#       
        if(Sdf_s_Tran == None):
            Sdf_s_Tran = np.zeros(df_s_Tran.shape) #Sdf_s_Wcat = np.zeros(df_s_Wcat.shape); 
#        Sdf_s_Wcat += df_s_Wcat
        Sdf_s_Tran += df_s_Tran
        
        #calculate hidden layer gradients
#        numFeatures = len(params.features_std)
        #do backpropagation  for this verb  
        for i, ftree in enumerate(forwardPropTrees):
            #calculate deltas for nodes, delta_m and delta_h
#            xx = np.dot(ftree.poolMatrix,np.transpose(Wcat[:,:-numFeatures]))
#            nodeDeltas = xx.dot(grad_Wcat[i]) 
            [df_s_Wcat, ftree.nodeVecDeltas, ftree.NN_deltas, paddingDelta] = backpropPool(ftree, grad_Wcat[[i],:], Wcat, params)                                                                                            
    
            deltaDown_vec = np.zeros((params.wordSize,1))
            deltaDown_op = np.zeros((params.wordSize,params.wordSize))
            
            topNode = ftree.getTopNode();
            [df_s_Wv, df_s_Wo, df_s_W, df_s_WO] = backpropAll(ftree, W, WO, Wo, params, deltaDown_vec, deltaDown_op, topNode, Wv.shape[1], None, None)
            
            #Backprop into Padding
            df_s_Wv[:,0] = df_s_Wv[:,0] + paddingDelta.flatten()
            
            if(Sdf_s_Wv == None):
                Sdf_s_Wv = np.zeros(df_s_Wv.shape); Sdf_s_Wo = np.zeros(df_s_Wo.shape); Sdf_s_W = np.zeros(df_s_W.shape); 
                Sdf_s_WO = np.zeros(df_s_WO.shape); Sdf_s_Wcat = np.zeros(df_s_Wcat.shape)
            
            Sdf_s_Wv = Sdf_s_Wv + df_s_Wv
            Sdf_s_Wo = Sdf_s_Wo + df_s_Wo
            Sdf_s_W = Sdf_s_W + df_s_W
            Sdf_s_WO = Sdf_s_WO + df_s_WO
            Sdf_s_Wcat = Sdf_s_Wcat + df_s_Wcat
#            Sdf_s_Tran = Sdf_s_Tran + df_s_Tran
        
        #scale cost and derivative by the number of forward trees created for this verb
        #divide by number of sentences 
        numTrees = len(forwardPropTrees)
        Sdf_s_Wcat = (1.0/numTrees) * Sdf_s_Wcat
        Sdf_s_W = (1.0/numTrees) * Sdf_s_W
        Sdf_s_Wv   = (1.0/numTrees) * Sdf_s_Wv
        Sdf_s_WO   = (1.0/numTrees) * Sdf_s_WO
        Sdf_s_Wo   = (1.0/numTrees) * Sdf_s_Wo 
#        Sdf_s_Tran = (1.0/numTrees) * Sdf_s_Tran
    
    #scale w.r.t. number of verbs in this sentence
    numVerbs = verbIndices.shape[0]
    cost = (1.0/numVerbs) * cost
    Sdf_s_Wcat = (1.0/numVerbs) * Sdf_s_Wcat
    Sdf_s_W = (1.0/numVerbs) * Sdf_s_W
    Sdf_s_Wv   = (1.0/numVerbs) * Sdf_s_Wv
    Sdf_s_WO   = (1.0/numVerbs) * Sdf_s_WO
    Sdf_s_Wo   = (1.0/numVerbs) * Sdf_s_Wo    
    Sdf_s_Tran   = (1.0/numVerbs) * Sdf_s_Tran      
    
    return [Sdf_s_Wv, Sdf_s_Wo, Sdf_s_W, Sdf_s_WO, Sdf_s_Wcat, Sdf_s_Tran, cost]