def testloadData(self): nExamples = 15 train_data = '/home/bhanu/Downloads/relationClassification/dataCamera/allDataTrain.mat' pre_trained_weights = '/home/bhanu/Downloads/relationClassification/dataCamera/pretrainedWeights.mat' rnnData = RNNDataCorpus() rnnData.load_data(load_file=train_data, nExamples=nExamples) params = Params(data=rnnData, wordSize=50, rankWo=3) # n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo # theta = np.random.normal(loc=0.0, scale=np.math.pow(10,-5)**2, size = n*(2*n+1) + n*2*n + nLabels*fanIn + n*nWords + (2*n*rank+n)*nWords) # # #init respective parameters with prior values # W, WO, Wcat, Wv, Wo = unroll_theta(theta, params) # Wo[:n,:] = np.ones((n,Wo.shape[1])) #init word matrices with Identity matrices + epsilon #load pre-trained weights here mats = sio.loadmat(pre_trained_weights) Wv = mats.get('Wv') W = mats.get('W') WO = mats.get('WO') Wo = np.ndarray( shape=(2 * params.wordSize * params.rankWo + params.wordSize) * params.nWords, dtype='float64') sentencesIdx = np.arange(nExamples) [allSNum_batch, allSNN_batch, Wv_batch, Wo_batch, allWordInds] = getRelevantWords(rnnData, sentencesIdx, Wv, Wo, params) print
def train(): SEED = 131742 load_model = False custom_load = True #loads model from previously saved model except Wcat np.random.seed(SEED) #get sentences, trees and labels nExamples = 5 print "loading data.." rnnData_train = RNNDataCorpus() rnnData_train.load_data_srl(load_file=config.train_data_srl, nExamples=nExamples) rnnData_dev = RNNDataCorpus() rnnData_dev.load_data_srl(load_file=config.dev_data_srl, nExamples=nExamples) print "Number of sentences loaded in training data: ", rnnData_train.ndoc() #initialize params print "initializing params" params = Params(data=rnnData_train, wordSize=52, rankWo=2) n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo if(load_model): with open(config.saved_params_file+"_45", 'r') as loadfile: rnn = cPickle.load(loadfile) elif(custom_load): d = 2#extra features for wordvectors Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper Wv = 0.01*np.random.randn(n, nWords) WO = 0.01*np.random.randn(n, 2*n) W = 0.01*np.random.randn(n, 2*n+1) #load pre-trained weights here with open(config.saved_params_file+'_65', 'r') as loadfile: oldrnn = cPickle.load(loadfile) Wv[:-d,:] = oldrnn.Wv # WO[:-d,:] = oldrnn.WO rnn = MVRNN(W, WO, Wcat, Wv, Wo) else: Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper Wv = 0.01*np.random.randn(n, nWords) WO = 0.01*np.random.randn(n, 2*n) W = 0.01*np.random.randn(n, 2*n+1) rnn = MVRNN(W, WO, Wcat, Wv, Wo) [_, _, all_train_idx] = getRelevantWords(rnnData_train, rnn.Wv,rnn.Wo,params) #sets nWords_reduced, returns new arrays params.setNumReducedWords(len(all_train_idx)) theta = rnn.getTheta(params, all_train_idx) #optimize print "starting training using SGD..." nIter = 500 optimizer = StochasticGradientDescent(niter=nIter , learning_rate=0.01, learningrateFactor=1.0, printAt10Iter='.', printAt100Iter='\n+') optimizer.minimizeBatches(rnn=rnn, rnnData_train=rnnData_train, allTrainSentIdx=all_train_idx, params=params, x0=theta, func=costFn, fprime=None, rnnData_test=rnnData_dev, initialSetSize=1, niter=nIter, seed=17, modelFileName=config.saved_params_file+'SGD_SRL', printStatistics=True, modelSaveIter=1, nIterInPart=1, nFetch=-1, rnd=None, nodeid=-1) print "Finished training! "
def testloadData(self): nExamples = 15 train_data = '/home/bhanu/Downloads/relationClassification/dataCamera/allDataTrain.mat' pre_trained_weights = '/home/bhanu/Downloads/relationClassification/dataCamera/pretrainedWeights.mat' rnnData = RNNDataCorpus() rnnData.load_data(load_file=train_data, nExamples=nExamples) params = Params(data=rnnData, wordSize=50, rankWo=3) # n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo # theta = np.random.normal(loc=0.0, scale=np.math.pow(10,-5)**2, size = n*(2*n+1) + n*2*n + nLabels*fanIn + n*nWords + (2*n*rank+n)*nWords) # # #init respective parameters with prior values # W, WO, Wcat, Wv, Wo = unroll_theta(theta, params) # Wo[:n,:] = np.ones((n,Wo.shape[1])) #init word matrices with Identity matrices + epsilon #load pre-trained weights here mats = sio.loadmat(pre_trained_weights) Wv = mats.get('Wv'); W = mats.get('W'); WO = mats.get('WO') Wo = np.ndarray(shape=(2*params.wordSize*params.rankWo + params.wordSize)*params.nWords, dtype='float64') sentencesIdx = np.arange(nExamples) [allSNum_batch, allSNN_batch, Wv_batch, Wo_batch, allWordInds] = getRelevantWords(rnnData, sentencesIdx, Wv,Wo,params) print
def initMVRNN(self): print "Node: ",self.index," Loading training and dev sets.." self.rnnData_train = RNNDataCorpus() self.rnnData_train.load_data_srl(config.train_data_srl, nExamples=self.nuse) self.rnnData_dev = RNNDataCorpus() self.rnnData_dev.load_data_srl(config.dev_data_srl, nExamples=self.nuse) modelfilename = config.saved_params_file+'SGD_SRLiter305' print "Node: ", self.index," Loading model: ", modelfilename # mats = sio.loadmat(config.saved_params_file+'iter120.mat') # Wv = mats.get('Wv') #L, as in paper # W = mats.get('W') #W, as in paper # WO = mats.get('WO') #Wm, as in paper # Wo = mats.get('Wo') # Wcat = mats.get('Wcat') # n = Wv.shape[0] # r = (Wo.shape[0] - n)/(2*n) with open(modelfilename, 'r') as loadfile: self.rnn = cPickle.load(loadfile)#MVRNN(W, WO, Wcat, Wv, Wo) n = self.rnn.Wv.shape[0] r = (self.rnn.Wo.shape[0] - n)/(2*n) print "Node: ",self.index, "initializing params.." self.params = Params(data=self.rnnData_train, wordSize=n, rankWo=r) # #to be removed # Wcat = 0.005*np.random.randn(self.params.categories, self.params.fanIn) # self.rnn.Wcat = Wcat if(self.index == 0): print "Master## Total trees in training set: ", self.rnnData_train.ndoc() print "Master## nFetch: ", self.nFetch if(self.index!=0): self.rnnData_train = self.get_subset(self.rnnData_train, self.index) self.rnnData_dev = None # workers don't need test set and trainTest, so to free memory release them [_, _, self.all_train_idx] = getRelevantWords(self.rnnData_train, self.rnn.Wv,self.rnn.Wo,self.params) #sets nWords_reduced, returns new arrays
def train(): np.random.seed(131742) #get sentences, trees and labels nExamples = -1 print "loading data.." rnnData = RNNDataCorpus() rnnData.load_data(load_file=config.train_data, nExamples=nExamples) #initialize params print "initializing params" params = Params(data=rnnData, wordSize=50, rankWo=2) #define theta #one vector for all the parameters of mvrnn model: W, Wm, Wlabel, L, Lm n = params.wordSize fanIn = params.fanIn nWords = params.nWords nLabels = params.categories rank = params.rankWo Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords) #Lm, as in paper Wo[:n, :] = np.ones((n, Wo.shape[1])) #Lm, as in paper Wcat = 0.005 * np.random.randn(nLabels, fanIn) #Wlabel, as in paper # Wv = 0.01*np.random.randn(n, nWords) # WO = 0.01*np.random.randn(n, 2*n) # W = 0.01*np.random.randn(n, 2*n+1) #load pre-trained weights here mats = sio.loadmat(config.pre_trained_weights) Wv = mats.get('Wv') #L, as in paper W = mats.get('W') #W, as in paper WO = mats.get('WO') #Wm, as in paper sentencesIdx = np.arange(rnnData.ndoc()) np.random.shuffle(sentencesIdx) nTrain = 4 * len(sentencesIdx) / 5 trainSentIdx = sentencesIdx[0:nTrain] testSentIdx = sentencesIdx[nTrain:] batchSize = 5 nBatches = len(trainSentIdx) / batchSize evalFreq = 5 #evaluate after every 5 minibatches nTestSentEval = 50 #number of test sentences to be evaluated rnnData_train = RNNDataCorpus() rnnData.copy_into_minibatch(rnnData_train, trainSentIdx) rnnData_test = RNNDataCorpus() if (len(testSentIdx) > nTestSentEval): # np.random.shuffle(testSentIdx) #choose random test examples thisTestSentIdx = testSentIdx[:nTestSentEval] else: thisTestSentIdx = testSentIdx rnnData.copy_into_minibatch(rnnData_test, thisTestSentIdx) # [Wv_test, Wo_test, _] = getRelevantWords(rnnData_test, Wv,Wo,params) [Wv_trainTest, Wo_trainTest, all_train_idx ] = getRelevantWords(rnnData, Wv, Wo, params) #sets nWords_reduced, returns new arrays theta = np.concatenate((W.flatten(), WO.flatten(), Wcat.flatten(), Wv_trainTest.flatten(), Wo_trainTest.flatten())) #optimize print "starting training..." nIter = 100 rnnData_minibatch = RNNDataCorpus() for i in range(nIter): #train in minibatches # ftrain = np.zeros(nBatches) # for ibatch in range(nBatches): # set_minibatch(rnnData, rnnData_minibatch, ibatch, nBatches, trainSentIdx) # print 'Iteration: ', i, ' minibatch: ', ibatch tunedTheta, fbatch_train, _ = lbfgsb.fmin_l_bfgs_b( func=costFn, x0=theta, fprime=None, args=(rnnData_train, params), approx_grad=0, bounds=None, m=5, factr=1000000000000000.0, pgtol=1.0000000000000001e-5, epsilon=1e-08, iprint=3, maxfun=1, disp=0) #map parameters back W[:, :], WO[:, :], Wcat[:, :], Wv_trainTest, Wo_trainTest = unroll_theta( tunedTheta, params) Wv[:, all_train_idx] = Wv_trainTest Wo[:, all_train_idx] = Wo_trainTest # ftrain[ibatch] = fbatch_train theta = tunedTheta #for next iteration print "========================================" print "XXXXXXIteration ", i, print "Average cost: ", np.average(fbatch_train) evaluate(Wv, Wo, W, WO, Wcat, params, rnnData_test) print "========================================" #save weights save_dict = {'Wv': Wv, 'Wo': Wo, 'Wcat': Wcat, 'W': W, 'WO': WO} sio.savemat(config.saved_params_file + '_lbfgs_iter' + str(i), mdict=save_dict) print "saved tuned theta. "
def train(): SEED = 13742 load_model = False custom_load = False np.random.seed(SEED) #get sentences, trees and labels nExamples = 5 print "loading data.." rnnData_train = RNNDataCorpus() rnnData_train.load_data_srl(load_file=config.train_data_srl, nExamples=nExamples) rnnData_dev = RNNDataCorpus() rnnData_dev.load_data_srl(load_file=config.dev_data_srl, nExamples=nExamples) print "Number of sentences loaded in training data: ", rnnData_train.ndoc() #initialize params print "initializing params" params = Params(data=rnnData_train, wordSize=52, rankWo=2) n = params.wordSize fanIn = params.fanIn nWords = params.nWords nLabels = params.categories rank = params.rankWo if (load_model): modelfile = config.saved_params_file + 'SGD_SLL300' rnn = MVRNNSLL.load(modelfile) print 'loaded model : ', modelfile elif (custom_load): modelfile = config.saved_params_file + 'SGD_SLL300' print "loading customized model..", modelfile # d = 2#extra features for wordvectors # Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper # Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper # Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper # Wv = 0.01*np.random.randn(n, nWords) # WO = 0.01*np.random.randn(n, 2*n) # W = 0.01*np.random.randn(n, 2*n+1) #load pre-trained weights here oldrnn = MVRNNSLL.load(modelfile) # Wv[:-d,:] = oldrnn.Wv categories = [x.strip() for x in rnnData_train.categories] Tran = init_transitions(dict(zip(categories, range(len(categories)))), 'iob') rnn = MVRNNSLL(oldrnn.W, oldrnn.WO, oldrnn.Wcat, oldrnn.Wv, oldrnn.Wo, Tran) else: #define theta #one vector for all the parameters of mvrnn model: W, Wm, Wlabel, L, Lm # n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo # Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper # Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper # Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper # #load pre-trained weights here ## mats = sio.loadmat(config.saved_params_file) # oldrnn = MVRNNSLL.load(modelfile) # Wv = oldrnn.Wv #L, as in paper # W = oldrnn..get('W') #W, as in paper # WO = mats.get('WO') #Wm, as in paper Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords) #Lm, as in paper Wo[:n, :] = np.ones((n, Wo.shape[1])) #Lm, as in paper Wcat = 0.005 * np.random.randn(nLabels, fanIn) #Wlabel, as in paper Wv = 0.01 * np.random.randn(n, nWords) WO = 0.01 * np.random.randn(n, 2 * n) W = 0.01 * np.random.randn(n, 2 * n + 1) categories = [x.strip() for x in rnnData_train.categories] Tran = init_transitions(dict(zip(categories, range(len(categories)))), 'iob') rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran) # rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran) [_, _, all_train_idx ] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo, params) #sets nWords_reduced, returns new arrays params.setNumReducedWords(len(all_train_idx)) theta = rnn.getTheta(params, all_train_idx) #optimize print "starting training using SGD..." nIter = 500 optimizer = StochasticGradientDescent(niter=nIter, learning_rate=0.01, learningrateFactor=1.0, printAt10Iter='.', printAt100Iter='\n+') optimizer.minimizeBatches(rnn=rnn, rnnData_train=rnnData_train, allTrainSentIdx=all_train_idx, params=params, x0=theta, func=costFn, fprime=None, rnnData_test=rnnData_dev, initialSetSize=1, niter=nIter, seed=SEED, modelFileName=config.saved_params_file + 'SGD_SLL', printStatistics=True, modelSaveIter=100, nIterInPart=1, nFetch=-1, rnd=None, nodeid=-1)
def train(): SEED = 131742 load_model = False custom_load = True #loads model from previously saved model except Wcat np.random.seed(SEED) #get sentences, trees and labels nExamples = 5 print "loading data.." rnnData_train = RNNDataCorpus() rnnData_train.load_data_srl(load_file=config.train_data_srl, nExamples=nExamples) rnnData_dev = RNNDataCorpus() rnnData_dev.load_data_srl(load_file=config.dev_data_srl, nExamples=nExamples) print "Number of sentences loaded in training data: ", rnnData_train.ndoc() #initialize params print "initializing params" params = Params(data=rnnData_train, wordSize=52, rankWo=2) n = params.wordSize fanIn = params.fanIn nWords = params.nWords nLabels = params.categories rank = params.rankWo if (load_model): with open(config.saved_params_file + "_45", 'r') as loadfile: rnn = cPickle.load(loadfile) elif (custom_load): d = 2 #extra features for wordvectors Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords) #Lm, as in paper Wo[:n, :] = np.ones((n, Wo.shape[1])) #Lm, as in paper Wcat = 0.005 * np.random.randn(nLabels, fanIn) #Wlabel, as in paper Wv = 0.01 * np.random.randn(n, nWords) WO = 0.01 * np.random.randn(n, 2 * n) W = 0.01 * np.random.randn(n, 2 * n + 1) #load pre-trained weights here with open(config.saved_params_file + '_65', 'r') as loadfile: oldrnn = cPickle.load(loadfile) Wv[:-d, :] = oldrnn.Wv # WO[:-d,:] = oldrnn.WO rnn = MVRNN(W, WO, Wcat, Wv, Wo) else: Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords) #Lm, as in paper Wo[:n, :] = np.ones((n, Wo.shape[1])) #Lm, as in paper Wcat = 0.005 * np.random.randn(nLabels, fanIn) #Wlabel, as in paper Wv = 0.01 * np.random.randn(n, nWords) WO = 0.01 * np.random.randn(n, 2 * n) W = 0.01 * np.random.randn(n, 2 * n + 1) rnn = MVRNN(W, WO, Wcat, Wv, Wo) [_, _, all_train_idx ] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo, params) #sets nWords_reduced, returns new arrays params.setNumReducedWords(len(all_train_idx)) theta = rnn.getTheta(params, all_train_idx) #optimize print "starting training using SGD..." nIter = 500 optimizer = StochasticGradientDescent(niter=nIter, learning_rate=0.01, learningrateFactor=1.0, printAt10Iter='.', printAt100Iter='\n+') optimizer.minimizeBatches(rnn=rnn, rnnData_train=rnnData_train, allTrainSentIdx=all_train_idx, params=params, x0=theta, func=costFn, fprime=None, rnnData_test=rnnData_dev, initialSetSize=1, niter=nIter, seed=17, modelFileName=config.saved_params_file + 'SGD_SRL', printStatistics=True, modelSaveIter=1, nIterInPart=1, nFetch=-1, rnd=None, nodeid=-1) print "Finished training! "
def train(): np.random.seed(131742) #get sentences, trees and labels nExamples = -1 print "loading data.." rnnData = RNNDataCorpus() rnnData.load_data(load_file=config.train_data, nExamples=nExamples) #initialize params print "initializing params" params = Params(data=rnnData, wordSize=50, rankWo=2) #define theta #one vector for all the parameters of mvrnn model: W, Wm, Wlabel, L, Lm n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper # Wv = 0.01*np.random.randn(n, nWords) # WO = 0.01*np.random.randn(n, 2*n) # W = 0.01*np.random.randn(n, 2*n+1) #load pre-trained weights here mats = sio.loadmat(config.pre_trained_weights) Wv = mats.get('Wv') #L, as in paper W = mats.get('W') #W, as in paper WO = mats.get('WO') #Wm, as in paper sentencesIdx = np.arange(rnnData.ndoc()) np.random.shuffle(sentencesIdx) nTrain = 4*len(sentencesIdx)/5 trainSentIdx = sentencesIdx[0:nTrain] testSentIdx = sentencesIdx[nTrain:] batchSize = 5 nBatches = len(trainSentIdx)/batchSize evalFreq = 5 #evaluate after every 5 minibatches nTestSentEval = 50 #number of test sentences to be evaluated rnnData_train = RNNDataCorpus() rnnData.copy_into_minibatch(rnnData_train, trainSentIdx) rnnData_test = RNNDataCorpus() if(len(testSentIdx) > nTestSentEval): # np.random.shuffle(testSentIdx) #choose random test examples thisTestSentIdx = testSentIdx[:nTestSentEval] else: thisTestSentIdx = testSentIdx rnnData.copy_into_minibatch(rnnData_test, thisTestSentIdx) # [Wv_test, Wo_test, _] = getRelevantWords(rnnData_test, Wv,Wo,params) [Wv_trainTest, Wo_trainTest, all_train_idx] = getRelevantWords(rnnData, Wv,Wo,params) #sets nWords_reduced, returns new arrays theta = np.concatenate((W.flatten(), WO.flatten(), Wcat.flatten(), Wv_trainTest.flatten(), Wo_trainTest.flatten())) #optimize print "starting training..." nIter = 100 rnnData_minibatch = RNNDataCorpus() for i in range(nIter): #train in minibatches # ftrain = np.zeros(nBatches) # for ibatch in range(nBatches): # set_minibatch(rnnData, rnnData_minibatch, ibatch, nBatches, trainSentIdx) # print 'Iteration: ', i, ' minibatch: ', ibatch tunedTheta, fbatch_train, _ = lbfgsb.fmin_l_bfgs_b(func=costFn, x0=theta, fprime=None, args=(rnnData_train, params), approx_grad=0, bounds=None, m=5, factr=1000000000000000.0, pgtol=1.0000000000000001e-5, epsilon=1e-08, iprint=3, maxfun=1, disp=0) #map parameters back W[:,:], WO[:,:], Wcat[:,:], Wv_trainTest, Wo_trainTest = unroll_theta(tunedTheta, params) Wv[:,all_train_idx] = Wv_trainTest Wo[:,all_train_idx] = Wo_trainTest # ftrain[ibatch] = fbatch_train theta = tunedTheta #for next iteration print "========================================" print "XXXXXXIteration ", i, print "Average cost: ", np.average(fbatch_train) evaluate(Wv,Wo,W,WO,Wcat,params, rnnData_test) print "========================================" #save weights save_dict = {'Wv':Wv, 'Wo':Wo, 'Wcat':Wcat, 'W':W, 'WO':WO} sio.savemat(config.saved_params_file+'_lbfgs_iter'+str(i), mdict=save_dict) print "saved tuned theta. "
def train(): SEED = 13742 load_model = False custom_load = False np.random.seed(SEED) #get sentences, trees and labels nExamples = 5 print "loading data.." rnnData_train = RNNDataCorpus() rnnData_train.load_data_srl(load_file=config.train_data_srl, nExamples=nExamples) rnnData_dev = RNNDataCorpus() rnnData_dev.load_data_srl(load_file=config.dev_data_srl, nExamples=nExamples) print "Number of sentences loaded in training data: ", rnnData_train.ndoc() #initialize params print "initializing params" params = Params(data=rnnData_train, wordSize=52, rankWo=2) n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo if(load_model): modelfile = config.saved_params_file+'SGD_SLL300' rnn = MVRNNSLL.load(modelfile) print 'loaded model : ', modelfile elif(custom_load): modelfile = config.saved_params_file+'SGD_SLL300' print "loading customized model..", modelfile # d = 2#extra features for wordvectors # Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper # Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper # Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper # Wv = 0.01*np.random.randn(n, nWords) # WO = 0.01*np.random.randn(n, 2*n) # W = 0.01*np.random.randn(n, 2*n+1) #load pre-trained weights here oldrnn = MVRNNSLL.load(modelfile) # Wv[:-d,:] = oldrnn.Wv categories = [x.strip() for x in rnnData_train.categories] Tran = init_transitions(dict(zip(categories, range(len(categories)))), 'iob') rnn = MVRNNSLL(oldrnn.W, oldrnn.WO, oldrnn.Wcat, oldrnn.Wv, oldrnn.Wo, Tran) else: #define theta #one vector for all the parameters of mvrnn model: W, Wm, Wlabel, L, Lm # n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo # Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper # Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper # Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper # #load pre-trained weights here ## mats = sio.loadmat(config.saved_params_file) # oldrnn = MVRNNSLL.load(modelfile) # Wv = oldrnn.Wv #L, as in paper # W = oldrnn..get('W') #W, as in paper # WO = mats.get('WO') #Wm, as in paper Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper Wv = 0.01*np.random.randn(n, nWords) WO = 0.01*np.random.randn(n, 2*n) W = 0.01*np.random.randn(n, 2*n+1) categories = [x.strip() for x in rnnData_train.categories] Tran = init_transitions(dict(zip(categories, range(len(categories)))), 'iob') rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran) # rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran) [_, _, all_train_idx] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo, params) #sets nWords_reduced, returns new arrays params.setNumReducedWords(len(all_train_idx)) theta = rnn.getTheta(params, all_train_idx) #optimize print "starting training using SGD..." nIter = 500 optimizer = StochasticGradientDescent(niter=nIter , learning_rate=0.01, learningrateFactor=1.0, printAt10Iter='.', printAt100Iter='\n+') optimizer.minimizeBatches(rnn=rnn, rnnData_train=rnnData_train, allTrainSentIdx=all_train_idx, params=params, x0=theta, func=costFn, fprime=None, rnnData_test=rnnData_dev, initialSetSize=1, niter=nIter, seed=SEED, modelFileName=config.saved_params_file+'SGD_SLL', printStatistics=True, modelSaveIter=100, nIterInPart=1, nFetch=-1, rnd=None, nodeid=-1)