Пример #1
0
    def startWorkerNode(self):
        '''Receives updated parameters from the master, keeps updating local parameters using its own subset of training data, pushes local parameters to 
            master, all asynchronously  '''        
        
        
        self.initMVRNN()  #also initializes training and test sets
        self.func = costFn
        self.fprime = None        
        self.optimizer = StochasticGradientDescent(niter=self.nIter , learning_rate=0.01, learningrateFactor=1.0, printAt10Iter='.', printAt100Iter='\n+')
        genObj = None   
        while True:
                #fetch action and parameters from masters
                action, master_rnn = self.pop('fromMaster_' + str(self.index))
                if(action == "finish"):
                    print "Node: ",self.index," Received finish action from master, exiting..."
                    return
                if self.debug:
                    print "Node: ", self.index, " Fetched new action & parameters from Master.. " + str(master_rnn.W[0,0])
                
                self.params.resetNumReducedWords() #to unroll all the words
#                W, WO, Wcat, Wv, Wo = unroll_theta(theta, self.params)
                self.copy_into_rnn(master_rnn.getParamsList())  #copy to local rnn to global                
                theta = self.rnn.getTheta(self.params, self.all_train_idx)
                self.params.setNumReducedWords(len(self.all_train_idx))  #set number of reduced words, to be used by costfn for unrolling
                start_time = time.clock()                
                
                try:
                    genObj.next()
                    
                except (AttributeError, StopIteration) :
                    
                    genObj =  self.optimizer.minimizeBatchesPll(rnn=self.rnn, rnnData_train=self.rnnData_train, allTrainSentIdx=self.all_train_idx, 
                                                             params=self.params, x0=theta, func=costFn, fprime=None, 
                                                             rnnData_test=self.rnnData_dev, initialSetSize=1, niter=1, seed=self.seed,
                                                             modelFileName='', printStatistics=False, modelSaveIter=10, nIterInPart=1,  
                                                             nFetch=self.nFetch, rnd=self.rnd, nodeid=self.index)  #optimize this theta and save it in self.rnn 
                except:
                    raise
                end_time = time.clock()                
                
#                [W, WO, Wcat, Wv, Wo] = self.rnn 
#                theta = np.concatenate((W.flatten(), WO.flatten(), Wcat.flatten(), Wv.flatten(), Wo.flatten()))  # current local optimal theta

                #push local theta to master                
                self.push('toMaster_' + str(self.index), (self.rnn, None)) #push the theta value #pushing a tuple object #playdoh bug
                if self.debug:
                    print "Node:", self.index, " Execution time for ", self.nFetch, " minibatches: ", (end_time - start_time)/60, 'minutes'
                    print "Node:", self.index, " Pushed local parameters to Master.."
Пример #2
0
def train():
    SEED = 13742
    load_model = False
    custom_load = False
    np.random.seed(SEED)
    #get sentences, trees and labels
    nExamples = 5
    print "loading data.."
    rnnData_train = RNNDataCorpus()
    rnnData_train.load_data_srl(load_file=config.train_data_srl,
                                nExamples=nExamples)
    rnnData_dev = RNNDataCorpus()
    rnnData_dev.load_data_srl(load_file=config.dev_data_srl,
                              nExamples=nExamples)
    print "Number of sentences loaded in training data: ", rnnData_train.ndoc()
    #initialize params
    print "initializing params"
    params = Params(data=rnnData_train, wordSize=52, rankWo=2)
    n = params.wordSize
    fanIn = params.fanIn
    nWords = params.nWords
    nLabels = params.categories
    rank = params.rankWo
    if (load_model):
        modelfile = config.saved_params_file + 'SGD_SLL300'
        rnn = MVRNNSLL.load(modelfile)
        print 'loaded model : ', modelfile
    elif (custom_load):
        modelfile = config.saved_params_file + 'SGD_SLL300'
        print "loading customized model..", modelfile
        #        d = 2#extra features for wordvectors
        #        Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper
        #        Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper
        #        Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper
        #        Wv = 0.01*np.random.randn(n, nWords)
        #        WO = 0.01*np.random.randn(n, 2*n)
        #        W = 0.01*np.random.randn(n, 2*n+1)
        #load pre-trained weights here
        oldrnn = MVRNNSLL.load(modelfile)
        #        Wv[:-d,:] = oldrnn.Wv
        categories = [x.strip() for x in rnnData_train.categories]
        Tran = init_transitions(dict(zip(categories, range(len(categories)))),
                                'iob')
        rnn = MVRNNSLL(oldrnn.W, oldrnn.WO, oldrnn.Wcat, oldrnn.Wv, oldrnn.Wo,
                       Tran)
    else:
        #define theta
        #one vector for all the parameters of mvrnn model:  W, Wm, Wlabel, L, Lm
        #        n = params.wordSize; fanIn = params.fanIn; nWords = params.nWords; nLabels = params.categories; rank=params.rankWo
        #        Wo = 0.01*np.random.randn(n + 2*n*rank, nWords) #Lm, as in paper
        #        Wo[:n,:] = np.ones((n,Wo.shape[1])) #Lm, as in paper
        #        Wcat = 0.005*np.random.randn(nLabels, fanIn) #Wlabel, as in paper
        #        #load pre-trained weights here
        ##        mats = sio.loadmat(config.saved_params_file)
        #        oldrnn = MVRNNSLL.load(modelfile)
        #        Wv = oldrnn.Wv  #L, as in paper
        #        W = oldrnn..get('W') #W, as in paper
        #        WO = mats.get('WO') #Wm, as in paper
        Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
        Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
        Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
        Wv = 0.01 * np.random.randn(n, nWords)
        WO = 0.01 * np.random.randn(n, 2 * n)
        W = 0.01 * np.random.randn(n, 2 * n + 1)
        categories = [x.strip() for x in rnnData_train.categories]
        Tran = init_transitions(dict(zip(categories, range(len(categories)))),
                                'iob')
        rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran)


#    rnn = MVRNNSLL(W, WO, Wcat, Wv, Wo, Tran)
    [_, _, all_train_idx
     ] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo,
                          params)  #sets nWords_reduced, returns new arrays
    params.setNumReducedWords(len(all_train_idx))
    theta = rnn.getTheta(params, all_train_idx)

    #optimize
    print "starting training using SGD..."
    nIter = 500
    optimizer = StochasticGradientDescent(niter=nIter,
                                          learning_rate=0.01,
                                          learningrateFactor=1.0,
                                          printAt10Iter='.',
                                          printAt100Iter='\n+')

    optimizer.minimizeBatches(rnn=rnn,
                              rnnData_train=rnnData_train,
                              allTrainSentIdx=all_train_idx,
                              params=params,
                              x0=theta,
                              func=costFn,
                              fprime=None,
                              rnnData_test=rnnData_dev,
                              initialSetSize=1,
                              niter=nIter,
                              seed=SEED,
                              modelFileName=config.saved_params_file +
                              'SGD_SLL',
                              printStatistics=True,
                              modelSaveIter=100,
                              nIterInPart=1,
                              nFetch=-1,
                              rnd=None,
                              nodeid=-1)
Пример #3
0
def train():
    SEED = 131742
    load_model = False
    custom_load = True  #loads model from previously saved model except Wcat
    np.random.seed(SEED)
    #get sentences, trees and labels
    nExamples = 5
    print "loading data.."
    rnnData_train = RNNDataCorpus()
    rnnData_train.load_data_srl(load_file=config.train_data_srl,
                                nExamples=nExamples)
    rnnData_dev = RNNDataCorpus()
    rnnData_dev.load_data_srl(load_file=config.dev_data_srl,
                              nExamples=nExamples)
    print "Number of sentences loaded in training data: ", rnnData_train.ndoc()
    #initialize params
    print "initializing params"
    params = Params(data=rnnData_train, wordSize=52, rankWo=2)
    n = params.wordSize
    fanIn = params.fanIn
    nWords = params.nWords
    nLabels = params.categories
    rank = params.rankWo
    if (load_model):
        with open(config.saved_params_file + "_45", 'r') as loadfile:
            rnn = cPickle.load(loadfile)
    elif (custom_load):
        d = 2  #extra features for wordvectors
        Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
        Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
        Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
        Wv = 0.01 * np.random.randn(n, nWords)
        WO = 0.01 * np.random.randn(n, 2 * n)
        W = 0.01 * np.random.randn(n, 2 * n + 1)
        #load pre-trained weights here
        with open(config.saved_params_file + '_65', 'r') as loadfile:
            oldrnn = cPickle.load(loadfile)
        Wv[:-d, :] = oldrnn.Wv
        #        WO[:-d,:] = oldrnn.WO
        rnn = MVRNN(W, WO, Wcat, Wv, Wo)
    else:
        Wo = 0.01 * np.random.randn(n + 2 * n * rank, nWords)  #Lm, as in paper
        Wo[:n, :] = np.ones((n, Wo.shape[1]))  #Lm, as in paper
        Wcat = 0.005 * np.random.randn(nLabels, fanIn)  #Wlabel, as in paper
        Wv = 0.01 * np.random.randn(n, nWords)
        WO = 0.01 * np.random.randn(n, 2 * n)
        W = 0.01 * np.random.randn(n, 2 * n + 1)
        rnn = MVRNN(W, WO, Wcat, Wv, Wo)

    [_, _, all_train_idx
     ] = getRelevantWords(rnnData_train, rnn.Wv, rnn.Wo,
                          params)  #sets nWords_reduced, returns new arrays
    params.setNumReducedWords(len(all_train_idx))
    theta = rnn.getTheta(params, all_train_idx)

    #optimize
    print "starting training using SGD..."
    nIter = 500
    optimizer = StochasticGradientDescent(niter=nIter,
                                          learning_rate=0.01,
                                          learningrateFactor=1.0,
                                          printAt10Iter='.',
                                          printAt100Iter='\n+')

    optimizer.minimizeBatches(rnn=rnn,
                              rnnData_train=rnnData_train,
                              allTrainSentIdx=all_train_idx,
                              params=params,
                              x0=theta,
                              func=costFn,
                              fprime=None,
                              rnnData_test=rnnData_dev,
                              initialSetSize=1,
                              niter=nIter,
                              seed=17,
                              modelFileName=config.saved_params_file +
                              'SGD_SRL',
                              printStatistics=True,
                              modelSaveIter=1,
                              nIterInPart=1,
                              nFetch=-1,
                              rnd=None,
                              nodeid=-1)

    print "Finished training! "