def train(self):
     for e in xrange(self.starting_epoch, self.max_epochs):
         # training phase
         self.__reset_context()
         self.training_set.seek(0)
         previous_word = 0
         for current_word in  self.training_set:
             current_word = int(current_word)
             self.__feedforward(previous_word)
             self.__backpropagate(previous_word, current_word) 
             self.neu_context = np.copy(self.neu_hidden)
             previous_word = current_word
             # reset context at the end of each sentence
             if(self.reset_context_each_sentence and current_word==0):
                 self.__reset_context()
         # validation phase
         self.__reset_context()
         self.validation_set.seek(0)
         previous_word = 0
         logp = 0
         word_counter = 0
         for current_word in  self.validation_set:
             current_word = int(current_word)
             self.__feedforward(previous_word)
             # word==-1 are not in vocabulary
             if(current_word!=-1):
                 word_counter += 1
                 logp += np.log2(self.neu_output[current_word])
             self.neu_context = np.copy(self.neu_hidden)
             previous_word = current_word
             # reset context at the end of each sentence
             if(self.reset_context_each_sentence and current_word==0):
                 self.__reset_context()
         # print progress
         print("*******************")
         print("{}".format(time.strftime("%Y-%m-%d %H:%M:%S")))
         print("Epoch {}".format(e))
         print("Learning rate {}".format(self.learning_rate))
         print("Validation log probability {}".format(logp))
         print("Validation words counter {}".format(word_counter))
         if(word_counter>0):
             print("Validation PPL {}".format(np.power(2.0, -logp / word_counter)))
         # check improvement
         if(logp < self.logp_previous):
             self.__restore_weights()
         else:
             self.__save_weights()
         if(logp*self.min_improvement < self.logp_previous):
             if (not self.learning_rate_divide): 
                 self.learning_rate_divide = True
             else:
                 break
         if (self.learning_rate_divide):
             self.learning_rate /= 2
         self.logp_previous = logp
         # log last epoch for recovery
         cu.log_current_epoch(e+1, self.learning_rate, self.logp_previous, self.learning_rate_divide, self.recovery_config_file)