コード例 #1
0
    def testloadData(self):
        nExamples = 15
        train_data = '/home/bhanu/Downloads/relationClassification/dataCamera/allDataTrain.mat'
        pre_trained_weights = '/home/bhanu/Downloads/relationClassification/dataCamera/pretrainedWeights.mat'
        rnnData = RNNDataCorpus()
        rnnData.load_data(load_file=train_data, nExamples=nExamples)

        params = Params(data=rnnData, wordSize=50, rankWo=3)

        #        n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo
        #        theta = np.random.normal(loc=0.0, scale=np.math.pow(10,-5)**2, size = n*(2*n+1) + n*2*n + nLabels*fanIn + n*nWords + (2*n*rank+n)*nWords)
        #
        #        #init respective parameters with prior values
        #        W, WO, Wcat, Wv, Wo =  unroll_theta(theta, params)
        #        Wo[:n,:] = np.ones((n,Wo.shape[1]))  #init word matrices with Identity matrices + epsilon
        #load pre-trained weights here
        mats = sio.loadmat(pre_trained_weights)
        Wv = mats.get('Wv')
        W = mats.get('W')
        WO = mats.get('WO')
        Wo = np.ndarray(
            shape=(2 * params.wordSize * params.rankWo + params.wordSize) *
            params.nWords,
            dtype='float64')
        sentencesIdx = np.arange(nExamples)
        [allSNum_batch, allSNN_batch, Wv_batch, Wo_batch,
         allWordInds] = getRelevantWords(rnnData, sentencesIdx, Wv, Wo, params)

        print
コード例 #2
0
ファイル: test_functions.py プロジェクト: 5idaidai/MVRNN
    def testloadData(self):
        nExamples = 15
        train_data = '/home/bhanu/Downloads/relationClassification/dataCamera/allDataTrain.mat'
        pre_trained_weights = '/home/bhanu/Downloads/relationClassification/dataCamera/pretrainedWeights.mat'
        rnnData = RNNDataCorpus()
        rnnData.load_data(load_file=train_data, nExamples=nExamples)
        
        params = Params(data=rnnData, wordSize=50, rankWo=3)
        
#        n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo
#        theta = np.random.normal(loc=0.0, scale=np.math.pow(10,-5)**2, size = n*(2*n+1) + n*2*n + nLabels*fanIn + n*nWords + (2*n*rank+n)*nWords)
#        
#        #init respective parameters with prior values
#        W, WO, Wcat, Wv, Wo =  unroll_theta(theta, params)
#        Wo[:n,:] = np.ones((n,Wo.shape[1]))  #init word matrices with Identity matrices + epsilon
        #load pre-trained weights here
        mats = sio.loadmat(pre_trained_weights)
        Wv = mats.get('Wv'); W = mats.get('W'); WO = mats.get('WO')
        Wo = np.ndarray(shape=(2*params.wordSize*params.rankWo + params.wordSize)*params.nWords, dtype='float64')
        sentencesIdx = np.arange(nExamples)
        [allSNum_batch, allSNN_batch, Wv_batch, Wo_batch, allWordInds] = getRelevantWords(rnnData, sentencesIdx, Wv,Wo,params) 
        
        print
コード例 #3
0
ファイル: trainMVRNN.py プロジェクト: SemanticBeeng/MVRNN
def train():

    np.random.seed(131742)
    #get sentences, trees and labels
    nExamples = -1
    print "loading data.."
    rnnData = RNNDataCorpus()
    rnnData.load_data(load_file=config.train_data, nExamples=nExamples)

    #initialize params
    print "initializing params"
    params = Params(data=rnnData, wordSize=50, rankWo=2)

    #define theta
    #one vector for all the parameters of mvrnn model:  W, Wm, Wlabel, L, Lm
    n = params.wordSize
    fanIn = params.fanIn
    nWords = params.nWords
    nLabels = params.categories
    rank = params.rankWo
    Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
    Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
    Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
    #    Wv = 0.01*np.random.randn(n, nWords)
    #    WO = 0.01*np.random.randn(n, 2*n)
    #    W = 0.01*np.random.randn(n, 2*n+1)

    #load pre-trained weights here
    mats = sio.loadmat(config.pre_trained_weights)
    Wv = mats.get('Wv')  #L, as in paper
    W = mats.get('W')  #W, as in paper
    WO = mats.get('WO')  #Wm, as in paper

    sentencesIdx = np.arange(rnnData.ndoc())
    np.random.shuffle(sentencesIdx)
    nTrain = 4 * len(sentencesIdx) / 5
    trainSentIdx = sentencesIdx[0:nTrain]
    testSentIdx = sentencesIdx[nTrain:]
    batchSize = 5
    nBatches = len(trainSentIdx) / batchSize
    evalFreq = 5  #evaluate after every 5 minibatches
    nTestSentEval = 50  #number of test sentences to be evaluated

    rnnData_train = RNNDataCorpus()
    rnnData.copy_into_minibatch(rnnData_train, trainSentIdx)

    rnnData_test = RNNDataCorpus()
    if (len(testSentIdx) > nTestSentEval):
        #        np.random.shuffle(testSentIdx)  #choose random test examples
        thisTestSentIdx = testSentIdx[:nTestSentEval]
    else:
        thisTestSentIdx = testSentIdx
    rnnData.copy_into_minibatch(rnnData_test, thisTestSentIdx)

    #    [Wv_test, Wo_test, _] = getRelevantWords(rnnData_test, Wv,Wo,params)
    [Wv_trainTest, Wo_trainTest, all_train_idx
     ] = getRelevantWords(rnnData, Wv, Wo,
                          params)  #sets nWords_reduced, returns new arrays
    theta = np.concatenate((W.flatten(), WO.flatten(), Wcat.flatten(),
                            Wv_trainTest.flatten(), Wo_trainTest.flatten()))

    #optimize
    print "starting training..."
    nIter = 100
    rnnData_minibatch = RNNDataCorpus()
    for i in range(nIter):
        #train in minibatches
        #        ftrain = np.zeros(nBatches)
        #        for ibatch in range(nBatches):
        #            set_minibatch(rnnData, rnnData_minibatch, ibatch, nBatches, trainSentIdx)

        #            print 'Iteration: ', i, ' minibatch: ', ibatch
        tunedTheta, fbatch_train, _ = lbfgsb.fmin_l_bfgs_b(
            func=costFn,
            x0=theta,
            fprime=None,
            args=(rnnData_train, params),
            approx_grad=0,
            bounds=None,
            m=5,
            factr=1000000000000000.0,
            pgtol=1.0000000000000001e-5,
            epsilon=1e-08,
            iprint=3,
            maxfun=1,
            disp=0)

        #map parameters back
        W[:, :], WO[:, :], Wcat[:, :], Wv_trainTest, Wo_trainTest = unroll_theta(
            tunedTheta, params)
        Wv[:, all_train_idx] = Wv_trainTest
        Wo[:, all_train_idx] = Wo_trainTest

        #        ftrain[ibatch] = fbatch_train
        theta = tunedTheta  #for next iteration

        print "========================================"
        print "XXXXXXIteration ", i,
        print "Average cost: ", np.average(fbatch_train)
        evaluate(Wv, Wo, W, WO, Wcat, params, rnnData_test)
        print "========================================"

        #save weights
        save_dict = {'Wv': Wv, 'Wo': Wo, 'Wcat': Wcat, 'W': W, 'WO': WO}
        sio.savemat(config.saved_params_file + '_lbfgs_iter' + str(i),
                    mdict=save_dict)
        print "saved tuned theta. "
コード例 #4
0
ファイル: trainMVRNN.py プロジェクト: 5idaidai/MVRNN
def train():
    
    np.random.seed(131742)
    #get sentences, trees and labels
    nExamples = -1
    print "loading data.."
    rnnData = RNNDataCorpus()
    rnnData.load_data(load_file=config.train_data, nExamples=nExamples)  
    
    #initialize params
    print "initializing params"
    params = Params(data=rnnData, wordSize=50, rankWo=2)

    #define theta
    #one vector for all the parameters of mvrnn model:  W, Wm, Wlabel, L, Lm
    n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo
    Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper
    Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper
    Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper
#    Wv = 0.01*np.random.randn(n, nWords)
#    WO = 0.01*np.random.randn(n, 2*n)
#    W = 0.01*np.random.randn(n, 2*n+1)
    
    
    #load pre-trained weights here
    mats = sio.loadmat(config.pre_trained_weights)
    Wv = mats.get('Wv')  #L, as in paper
    W = mats.get('W') #W, as in paper
    WO = mats.get('WO') #Wm, as in paper
    
    
    sentencesIdx = np.arange(rnnData.ndoc())
    np.random.shuffle(sentencesIdx)
    nTrain = 4*len(sentencesIdx)/5
    trainSentIdx = sentencesIdx[0:nTrain]
    testSentIdx = sentencesIdx[nTrain:]
    batchSize = 5 
    nBatches = len(trainSentIdx)/batchSize
    evalFreq = 5  #evaluate after every 5 minibatches
    nTestSentEval = 50 #number of test sentences to be evaluated
    
   
    rnnData_train = RNNDataCorpus()
    rnnData.copy_into_minibatch(rnnData_train, trainSentIdx)
    
    rnnData_test = RNNDataCorpus()
    if(len(testSentIdx) > nTestSentEval):
#        np.random.shuffle(testSentIdx)  #choose random test examples
        thisTestSentIdx = testSentIdx[:nTestSentEval]
    else:
        thisTestSentIdx = testSentIdx
    rnnData.copy_into_minibatch(rnnData_test, thisTestSentIdx)
    
    
#    [Wv_test, Wo_test, _] = getRelevantWords(rnnData_test, Wv,Wo,params) 
    [Wv_trainTest, Wo_trainTest, all_train_idx] = getRelevantWords(rnnData, Wv,Wo,params) #sets nWords_reduced, returns new arrays    
    theta = np.concatenate((W.flatten(), WO.flatten(), Wcat.flatten(), Wv_trainTest.flatten(), Wo_trainTest.flatten()))
    
    #optimize    
    print "starting training..."
    nIter = 100
    rnnData_minibatch = RNNDataCorpus()
    for i in range(nIter):        
        #train in minibatches
#        ftrain = np.zeros(nBatches)
#        for ibatch in range(nBatches):            
#            set_minibatch(rnnData, rnnData_minibatch, ibatch, nBatches, trainSentIdx)
            
#            print 'Iteration: ', i, ' minibatch: ', ibatch
        tunedTheta, fbatch_train, _ = lbfgsb.fmin_l_bfgs_b(func=costFn, x0=theta, fprime=None, args=(rnnData_train, params), approx_grad=0, bounds=None, m=5,
                                        factr=1000000000000000.0, pgtol=1.0000000000000001e-5, epsilon=1e-08,
                                        iprint=3, maxfun=1, disp=0)
          
        #map parameters back
        W[:,:], WO[:,:], Wcat[:,:], Wv_trainTest, Wo_trainTest = unroll_theta(tunedTheta, params)
        Wv[:,all_train_idx] = Wv_trainTest
        Wo[:,all_train_idx] = Wo_trainTest
        
#        ftrain[ibatch] = fbatch_train  
        theta = tunedTheta  #for next iteration         
        
        print "========================================"
        print "XXXXXXIteration ", i, 
        print "Average cost: ", np.average(fbatch_train)
        evaluate(Wv,Wo,W,WO,Wcat,params, rnnData_test)
        print "========================================"                  
  
        #save weights
        save_dict = {'Wv':Wv, 'Wo':Wo, 'Wcat':Wcat, 'W':W, 'WO':WO}
        sio.savemat(config.saved_params_file+'_lbfgs_iter'+str(i), mdict=save_dict)
        print "saved tuned theta. "