Example #1
0
    def testloadData(self):
        nExamples = 15
        train_data = '/home/bhanu/Downloads/relationClassification/dataCamera/allDataTrain.mat'
        pre_trained_weights = '/home/bhanu/Downloads/relationClassification/dataCamera/pretrainedWeights.mat'
        rnnData = RNNDataCorpus()
        rnnData.load_data(load_file=train_data, nExamples=nExamples)

        params = Params(data=rnnData, wordSize=50, rankWo=3)

        #        n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo
        #        theta = np.random.normal(loc=0.0, scale=np.math.pow(10,-5)**2, size = n*(2*n+1) + n*2*n + nLabels*fanIn + n*nWords + (2*n*rank+n)*nWords)
        #
        #        #init respective parameters with prior values
        #        W, WO, Wcat, Wv, Wo =  unroll_theta(theta, params)
        #        Wo[:n,:] = np.ones((n,Wo.shape[1]))  #init word matrices with Identity matrices + epsilon
        #load pre-trained weights here
        mats = sio.loadmat(pre_trained_weights)
        Wv = mats.get('Wv')
        W = mats.get('W')
        WO = mats.get('WO')
        Wo = np.ndarray(
            shape=(2 * params.wordSize * params.rankWo + params.wordSize) *
            params.nWords,
            dtype='float64')
        sentencesIdx = np.arange(nExamples)
        [allSNum_batch, allSNN_batch, Wv_batch, Wo_batch,
         allWordInds] = getRelevantWords(rnnData, sentencesIdx, Wv, Wo, params)

        print
Example #2
0
 def get_subset(self, rnnData, workerIndex):
     ''' data : complete train or test set for which a subset of docs is to be calculated for this worker  '''
     rnnData_mini = RNNDataCorpus()
     dataSize = rnnData.ndoc()
     sizePerNode = dataSize/(self.totalNodes-1)      #exclude master node              
     startPos = (workerIndex-1) * sizePerNode
     if(workerIndex != self.totalNodes-1):     #last node on a machine gets all the remaining data
         endPos = startPos+sizePerNode
     else:
         endPos = dataSize  
     rnnData.copy_into_minibatch(rnnData_mini, range(startPos, endPos))        
     return rnnData_mini
Example #3
0
    def initMVRNN(self):
        print "Node: ",self.index," Loading training and dev sets.."         
        self.rnnData_train = RNNDataCorpus()
        self.rnnData_train.load_data_srl(config.train_data_srl, nExamples=self.nuse)
        self.rnnData_dev = RNNDataCorpus()
        self.rnnData_dev.load_data_srl(config.dev_data_srl, nExamples=self.nuse)
        modelfilename = config.saved_params_file+'SGD_SRLiter305'
        print "Node: ", self.index," Loading model: ", modelfilename
#        mats =   sio.loadmat(config.saved_params_file+'iter120.mat')
#        Wv = mats.get('Wv')  #L, as in paper
#        W = mats.get('W') #W, as in paper
#        WO = mats.get('WO') #Wm, as in paper
#        Wo = mats.get('Wo')
#        Wcat = mats.get('Wcat')
#        n = Wv.shape[0]
#        r = (Wo.shape[0] - n)/(2*n)
        with open(modelfilename, 'r') as loadfile:
            self.rnn = cPickle.load(loadfile)#MVRNN(W, WO, Wcat, Wv, Wo)
        n = self.rnn.Wv.shape[0]
        r = (self.rnn.Wo.shape[0] - n)/(2*n)
        print "Node: ",self.index, "initializing params.."
        self.params = Params(data=self.rnnData_train, wordSize=n, rankWo=r)
        
#        #to be removed
#        Wcat = 0.005*np.random.randn(self.params.categories, self.params.fanIn)
#        self.rnn.Wcat = Wcat
        
        
        if(self.index == 0):
            print "Master## Total trees in training set: ", self.rnnData_train.ndoc()
            print "Master## nFetch: ", self.nFetch
        
        if(self.index!=0):        
            self.rnnData_train = self.get_subset(self.rnnData_train, self.index)
            self.rnnData_dev = None # workers don't need test set and trainTest, so to free memory release them        
            [_, _, self.all_train_idx] = getRelevantWords(self.rnnData_train, self.rnn.Wv,self.rnn.Wo,self.params) #sets nWords_reduced, returns new arrays  
Example #4
0
 def test_write_srl_verbIndices(self):
     infilenames = [
         config.train_data_srl, config.test_data_srl, config.dev_data_srl
     ]
     outfilenames = [
         config.corpus_path + 'srl_vids.train',
         config.corpus_path + 'srl_vids.test',
         config.corpus_path + 'srl_vids.dev'
     ]
     for infilename, outfilename in zip(infilenames, outfilenames):
         rnndata = RNNDataCorpus()
         rnndata.load_data_srl(infilename, nExamples=100)
         allVerbIndices = rnndata.verbIndices
         with open(outfilename, 'w') as wf:
             for i in range(len(allVerbIndices)):
                 verbids = allVerbIndices[i].flatten()
                 verbidsStr = [str(x) for x in verbids]
                 wf.write("|".join(verbidsStr) + "\n")
                 wf.flush()
Example #5
0
def test():

    # load testing data
    print "loading test data.."
    rnnData = RNNDataCorpus()
    rnnData.load_data_srl(load_file=config.test_data_srl, nExamples=10)
    #    rnnData.load_data(load_file=config.test_data, nExamples=-1)

    print 'loading trained model : ', sys.argv[1]
    #    mats = sio.loadmat(config.model_path+str(sys.argv[1]))
    #    Wv = mats.get('Wv')  #L, as in paper
    #    W = mats.get('W') #W, as in paper
    #    WO = mats.get('WO') #Wm, as in paper
    #    Wo = mats.get('Wo')
    #    Wcat = mats.get('Wcat')
    #    n = Wv.shape[0]
    #    r = (Wo.shape[0] - n)/(2*n)
    #    rnn = MVRNN(W, WO, Wcat, Wv, Wo)
    with open(config.model_path + sys.argv[1], 'r') as loadfile:
        rnn = cPickle.load(loadfile)

    n = rnn.Wv.shape[0]
    r = (rnn.Wo.shape[0] - n) / (2 * n)

    print "initializing params.."
    params = Params(data=rnnData, wordSize=n, rankWo=r)

    print "evaluating.."
    predictLabels = rnn.evaluate(params, rnnData)

    print "creating labels file .."
    create_preds_file(predictLabels,
                      rnnData.categories,
                      rnnData.sentenceLabels,
                      predictions_file=config.results_path + 'preds_srl.txt',
                      testKeys_file='srl_test_keys.txt')
Example #6
0
def train():

    np.random.seed(131742)
    #get sentences, trees and labels
    nExamples = -1
    print "loading data.."
    rnnData = RNNDataCorpus()
    rnnData.load_data(load_file=config.train_data, nExamples=nExamples)

    #initialize params
    print "initializing params"
    params = Params(data=rnnData, wordSize=50, rankWo=2)

    #define theta
    #one vector for all the parameters of mvrnn model:  W, Wm, Wlabel, L, Lm
    n = params.wordSize
    fanIn = params.fanIn
    nWords = params.nWords
    nLabels = params.categories
    rank = params.rankWo
    Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
    Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
    Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
    #    Wv = 0.01*np.random.randn(n, nWords)
    #    WO = 0.01*np.random.randn(n, 2*n)
    #    W = 0.01*np.random.randn(n, 2*n+1)

    #load pre-trained weights here
    mats = sio.loadmat(config.pre_trained_weights)
    Wv = mats.get('Wv')  #L, as in paper
    W = mats.get('W')  #W, as in paper
    WO = mats.get('WO')  #Wm, as in paper

    sentencesIdx = np.arange(rnnData.ndoc())
    np.random.shuffle(sentencesIdx)
    nTrain = 4 * len(sentencesIdx) / 5
    trainSentIdx = sentencesIdx[0:nTrain]
    testSentIdx = sentencesIdx[nTrain:]
    batchSize = 5
    nBatches = len(trainSentIdx) / batchSize
    evalFreq = 5  #evaluate after every 5 minibatches
    nTestSentEval = 50  #number of test sentences to be evaluated

    rnnData_train = RNNDataCorpus()
    rnnData.copy_into_minibatch(rnnData_train, trainSentIdx)

    rnnData_test = RNNDataCorpus()
    if (len(testSentIdx) > nTestSentEval):
        #        np.random.shuffle(testSentIdx)  #choose random test examples
        thisTestSentIdx = testSentIdx[:nTestSentEval]
    else:
        thisTestSentIdx = testSentIdx
    rnnData.copy_into_minibatch(rnnData_test, thisTestSentIdx)

    #    [Wv_test, Wo_test, _] = getRelevantWords(rnnData_test, Wv,Wo,params)
    [Wv_trainTest, Wo_trainTest, all_train_idx
     ] = getRelevantWords(rnnData, Wv, Wo,
                          params)  #sets nWords_reduced, returns new arrays
    theta = np.concatenate((W.flatten(), WO.flatten(), Wcat.flatten(),
                            Wv_trainTest.flatten(), Wo_trainTest.flatten()))

    #optimize
    print "starting training..."
    nIter = 100
    rnnData_minibatch = RNNDataCorpus()
    for i in range(nIter):
        #train in minibatches
        #        ftrain = np.zeros(nBatches)
        #        for ibatch in range(nBatches):
        #            set_minibatch(rnnData, rnnData_minibatch, ibatch, nBatches, trainSentIdx)

        #            print 'Iteration: ', i, ' minibatch: ', ibatch
        tunedTheta, fbatch_train, _ = lbfgsb.fmin_l_bfgs_b(
            func=costFn,
            x0=theta,
            fprime=None,
            args=(rnnData_train, params),
            approx_grad=0,
            bounds=None,
            m=5,
            factr=1000000000000000.0,
            pgtol=1.0000000000000001e-5,
            epsilon=1e-08,
            iprint=3,
            maxfun=1,
            disp=0)

        #map parameters back
        W[:, :], WO[:, :], Wcat[:, :], Wv_trainTest, Wo_trainTest = unroll_theta(
            tunedTheta, params)
        Wv[:, all_train_idx] = Wv_trainTest
        Wo[:, all_train_idx] = Wo_trainTest

        #        ftrain[ibatch] = fbatch_train
        theta = tunedTheta  #for next iteration

        print "========================================"
        print "XXXXXXIteration ", i,
        print "Average cost: ", np.average(fbatch_train)
        evaluate(Wv, Wo, W, WO, Wcat, params, rnnData_test)
        print "========================================"

        #save weights
        save_dict = {'Wv': Wv, 'Wo': Wo, 'Wcat': Wcat, 'W': W, 'WO': WO}
        sio.savemat(config.saved_params_file + '_lbfgs_iter' + str(i),
                    mdict=save_dict)
        print "saved tuned theta. "
Example #7
0
def writeVectors():

    vecFileName = config.results_path + "vectors.out"
    vecFile = open(vecFileName, 'w')

    mats = sio.loadmat(config.corpus_path + 'vars.normalized.100.mat')
    We_orig = mats.get('We')

    params = sio.loadmat(config.corpus_path + 'params_rae.mat')
    W1 = params.get('W1')
    W2 = params.get('W2')
    b1 = params.get('b1')
    We = params.get('We')
    b = params.get('b')
    W = params.get('W')

    hiddenSize = 100

    nExamples = 5
    print "loading data.."
    rnnData_train = RNNDataCorpus()
    rnnData_train.load_data_srl(load_file=config.train_data_srl,
                                nExamples=nExamples)

    print 'writing vectors to: ', vecFileName
    for ii in range(len(rnnData_train.allSNum)):

        sNum = rnnData_train.allSNum[ii]
        sStr = rnnData_train.allSStr[ii]
        sTree = rnnData_train.allSTree[ii]
        sKids = rnnData_train.allSKids[ii]

        words_indexed = np.where(sNum >= 0)[0]
        #L is only the part of the embedding matrix that is relevant for this sentence
        #L is deltaWe
        if We.shape[1] != 0:
            L = We[:, words_indexed]
            words_embedded = We_orig[:, words_indexed] + L
        else:
            words_embedded = We_orig[:, words_indexed]


#        sl = words_embedded.shape[1]

        tree = Tree()
        tree.pp = all  #np.zeros(((2*sl-1),1))
        tree.nodeScores = np.zeros(len(sNum))
        #        tree.nodeNames = np.arange(1,(2*sl-1))
        tree.kids = np.zeros((len(sNum), 2))

        tree.nodeFeatures = np.zeros((hiddenSize, len(sNum)))
        tree.nodeFeatures[:, :len(words_indexed)] = words_embedded

        toMerge = np.zeros(shape=(words_indexed.shape), dtype='int32')
        toMerge[:] = words_indexed[:]
        while len(toMerge) > 1:
            # find unpaired bottom leaf pairs (initially words) that share parent
            i = -1
            foundGoodPair = False
            while (not foundGoodPair):
                i += 1
                if sTree[toMerge[i]] == sTree[toMerge[i + 1]]:
                    foundGoodPair = True

            newParent = sTree[toMerge[i]]
            kid1 = toMerge[i]
            kid2 = toMerge[i + 1]
            tree.kids[newParent, :] = [kid1, kid2]
            # set new parent to be possible merge candidate
            toMerge[i] = newParent
            # delete other kid
            toMerge = np.delete(toMerge, i + 1)

            c1 = tree.nodeFeatures[:, kid1]
            c2 = tree.nodeFeatures[:, kid2]

            p = np.tanh(np.dot(W1, c1) + np.dot(W2, c2) + b1.flatten())

            tree.nodeFeatures[:, newParent] = p

        vec = tree.nodeFeatures[-1]
        vecFile.write(" ".join([str(x) for x in vec]) + '\n')

    vecFile.close()
    print "finished! "
Example #8
0
def train():
    SEED = 13742
    load_model = False
    custom_load = False
    np.random.seed(SEED)
    #get sentences, trees and labels
    nExamples = 5
    print "loading data.."
    rnnData_train = RNNDataCorpus()
    rnnData_train.load_data_srl(load_file=config.train_data_srl,
                                nExamples=nExamples)
    rnnData_dev = RNNDataCorpus()
    rnnData_dev.load_data_srl(load_file=config.dev_data_srl,
                              nExamples=nExamples)
    print "Number of sentences loaded in training data: ", rnnData_train.ndoc()
    #initialize params
    print "initializing params"
    params = Params(data=rnnData_train, wordSize=52, rankWo=2)
    n = params.wordSize
    fanIn = params.fanIn
    nWords = params.nWords
    nLabels = params.categories
    rank = params.rankWo
    if (load_model):
        modelfile = config.saved_params_file + 'SGD_SLL300'
        rnn = MVRNNSLL.load(modelfile)
        print 'loaded model : ', modelfile
    elif (custom_load):
        modelfile = config.saved_params_file + 'SGD_SLL300'
        print "loading customized model..", modelfile
        #        d = 2#extra features for wordvectors
        #        Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper
        #        Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper
        #        Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper
        #        Wv = 0.01*np.random.randn(n, nWords)
        #        WO = 0.01*np.random.randn(n, 2*n)
        #        W = 0.01*np.random.randn(n, 2*n+1)
        #load pre-trained weights here
        oldrnn = MVRNNSLL.load(modelfile)
        #        Wv[:-d,:] = oldrnn.Wv
        categories = [x.strip() for x in rnnData_train.categories]
        Tran = init_transitions(dict(zip(categories, range(len(categories)))),
                                'iob')
        rnn = MVRNNSLL(oldrnn.W, oldrnn.WO, oldrnn.Wcat, oldrnn.Wv, oldrnn.Wo,
                       Tran)
    else:
        #define theta
        #one vector for all the parameters of mvrnn model:  W, Wm, Wlabel, L, Lm
        #        n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo
        #        Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper
        #        Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper
        #        Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper
        #        #load pre-trained weights here
        ##        mats = sio.loadmat(config.saved_params_file)
        #        oldrnn = MVRNNSLL.load(modelfile)
        #        Wv = oldrnn.Wv  #L, as in paper
        #        W = oldrnn..get('W') #W, as in paper
        #        WO = mats.get('WO') #Wm, as in paper
        Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
        Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
        Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
        Wv = 0.01 * np.random.randn(n, nWords)
        WO = 0.01 * np.random.randn(n, 2 * n)
        W = 0.01 * np.random.randn(n, 2 * n + 1)
        categories = [x.strip() for x in rnnData_train.categories]
        Tran = init_transitions(dict(zip(categories, range(len(categories)))),
                                'iob')
        rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran)


#    rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran)
    [_, _, all_train_idx
     ] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo,
                          params)  #sets nWords_reduced, returns new arrays
    params.setNumReducedWords(len(all_train_idx))
    theta = rnn.getTheta(params, all_train_idx)

    #optimize
    print "starting training using SGD..."
    nIter = 500
    optimizer = StochasticGradientDescent(niter=nIter,
                                          learning_rate=0.01,
                                          learningrateFactor=1.0,
                                          printAt10Iter='.',
                                          printAt100Iter='\n+')

    optimizer.minimizeBatches(rnn=rnn,
                              rnnData_train=rnnData_train,
                              allTrainSentIdx=all_train_idx,
                              params=params,
                              x0=theta,
                              func=costFn,
                              fprime=None,
                              rnnData_test=rnnData_dev,
                              initialSetSize=1,
                              niter=nIter,
                              seed=SEED,
                              modelFileName=config.saved_params_file +
                              'SGD_SLL',
                              printStatistics=True,
                              modelSaveIter=100,
                              nIterInPart=1,
                              nFetch=-1,
                              rnd=None,
                              nodeid=-1)
Example #9
0
 def test_load_srl_data(self):
     rnndata = RNNDataCorpus()
     rnndata.load_data_srl(config.test_data_srl, nExamples=-1)
Example #10
0
def train():
    SEED = 131742
    load_model = False
    custom_load = True  #loads model from previously saved model except Wcat
    np.random.seed(SEED)
    #get sentences, trees and labels
    nExamples = 5
    print "loading data.."
    rnnData_train = RNNDataCorpus()
    rnnData_train.load_data_srl(load_file=config.train_data_srl,
                                nExamples=nExamples)
    rnnData_dev = RNNDataCorpus()
    rnnData_dev.load_data_srl(load_file=config.dev_data_srl,
                              nExamples=nExamples)
    print "Number of sentences loaded in training data: ", rnnData_train.ndoc()
    #initialize params
    print "initializing params"
    params = Params(data=rnnData_train, wordSize=52, rankWo=2)
    n = params.wordSize
    fanIn = params.fanIn
    nWords = params.nWords
    nLabels = params.categories
    rank = params.rankWo
    if (load_model):
        with open(config.saved_params_file + "_45", 'r') as loadfile:
            rnn = cPickle.load(loadfile)
    elif (custom_load):
        d = 2  #extra features for wordvectors
        Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
        Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
        Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
        Wv = 0.01 * np.random.randn(n, nWords)
        WO = 0.01 * np.random.randn(n, 2 * n)
        W = 0.01 * np.random.randn(n, 2 * n + 1)
        #load pre-trained weights here
        with open(config.saved_params_file + '_65', 'r') as loadfile:
            oldrnn = cPickle.load(loadfile)
        Wv[:-d, :] = oldrnn.Wv
        #        WO[:-d,:] = oldrnn.WO
        rnn = MVRNN(W, WO, Wcat, Wv, Wo)
    else:
        Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
        Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
        Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
        Wv = 0.01 * np.random.randn(n, nWords)
        WO = 0.01 * np.random.randn(n, 2 * n)
        W = 0.01 * np.random.randn(n, 2 * n + 1)
        rnn = MVRNN(W, WO, Wcat, Wv, Wo)

    [_, _, all_train_idx
     ] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo,
                          params)  #sets nWords_reduced, returns new arrays
    params.setNumReducedWords(len(all_train_idx))
    theta = rnn.getTheta(params, all_train_idx)

    #optimize
    print "starting training using SGD..."
    nIter = 500
    optimizer = StochasticGradientDescent(niter=nIter,
                                          learning_rate=0.01,
                                          learningrateFactor=1.0,
                                          printAt10Iter='.',
                                          printAt100Iter='\n+')

    optimizer.minimizeBatches(rnn=rnn,
                              rnnData_train=rnnData_train,
                              allTrainSentIdx=all_train_idx,
                              params=params,
                              x0=theta,
                              func=costFn,
                              fprime=None,
                              rnnData_test=rnnData_dev,
                              initialSetSize=1,
                              niter=nIter,
                              seed=17,
                              modelFileName=config.saved_params_file +
                              'SGD_SRL',
                              printStatistics=True,
                              modelSaveIter=1,
                              nIterInPart=1,
                              nFetch=-1,
                              rnd=None,
                              nodeid=-1)

    print "Finished training! "