Exemplo n.º 1
0
    def __create_model__(self):
        """Method to create the model.
        """

        #Get the number of terms.
        numTerms = self.__get_num_terms__()

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 3,
                                self.batchSize_, numTerms, self.aminoInput_)

        #Create the placeholders.
        if self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(0)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(3)

        #Create the model.
        self.model_.create_model(self.epochStep_, self.numEpochs_)

        #Create the loss function.
        if self.balance_:
            self.lossWeights_ = tf.placeholder(tf.float32, [numTerms])
            self.loss_ = self.model_.create_loss(self.lossWeights_)
        else:
            self.loss_ = self.model_.create_loss()

        #Create accuracy
        correct = tf.equal(tf.argmax(self.model_.logits_, 1),
                           tf.argmax(self.model_.labelsPH_, 1))
        self.accuracy_ = (tf.reduce_sum(tf.cast(correct, tf.float32)) /
                          float(self.batchSize_)) * 100.0
        self.accuracyTest_ = tf.cast(correct, tf.float32) * 100.0
Exemplo n.º 2
0
    def __create_model__(self):
        """Method to create the model.
        """

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 
                3, self.batchSize_, 1, self.aminoInput_)
        
        #Create the placeholders.
        if not self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(3)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(0)

        #Create the model.
        self.model_.create_model(self.epochStep_, self.numEpochs_)

        #Create the loss function.        
        self.loss_ = self.model_.create_loss()
        self.probs_ = tf.nn.sigmoid(self.model_.logits_)

        #Create accuracy
        predictions = tf.cast(tf.greater(self.probs_, 0.5), tf.int32)
        labels = tf.cast(self.model_.labelsPH_, tf.int32)
        correct = tf.equal(predictions, labels)
        self.accuracy_ = (tf.reduce_sum(tf.cast(correct, tf.float32)) / 
            float(self.batchSize_))*100.0
Exemplo n.º 3
0
    def __create_model__(self):
        """Method to create the model.
        """

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 3,
                                self.batchSize_, 1195, self.aminoInput_)

        #Create the placeholders.
        if not self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(3)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(0)

        #Create the model.
        self.model_.create_model(0, 1)
Exemplo n.º 4
0
    def __create_model__(self):
        """Method to create the model.
        """

        #Get the number of terms.
        numTerms = self.__get_num_terms__()

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 3,
                                self.batchSize_, numTerms, self.aminoInput_)

        #Create the placeholders.
        if not self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(3)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(0)

        #Create the model.
        self.model_.create_model(0, 1)
Exemplo n.º 5
0
class ProtClassTrainLoop(TrainLoop):
    """Class to train a classification network on the protclass100 dataset.
    """
    
    def __init__(self, pConfigFile):
        """Constructor.

        Args:
            pConfigFile (string): Path to the configuration file.
        """

        #Load the configuration file.
        self.config_ = configparser.ConfigParser()
        self.config_.read(pConfigFile)

        #Load the parameters.
        trainConfigDict = self.config_._sections['ProteinsDB']
        self.batchSize_ = int(trainConfigDict['batchsize'])
        self.augment_ = trainConfigDict['augment'] == "True"
        self.maxModelsSaved_ = int(trainConfigDict['maxmodelssaved'])
        self.aminoInput_ = trainConfigDict['aminoinput'] == "True"
        self.foldId_ = trainConfigDict['foldid']

        #Call the constructor of the parent.
        TrainLoop.__init__(self, self.config_._sections['TrainLoop'])

        #Save the config file in the log folder.
        os.system('cp %s %s' % (pConfigFile, self.logFolder_))

        #Initialize best accuracy-
        self.bestAccuracy_ = 0.0
        self.bestClassAccuracy_ = 0.0


    def __create_datasets__(self):
        """Method to create the datasets.
        """

        print("")
        print("########## Loading training dataset")
        self.trainDS_ = ProtClassProteinsDB(
            pDataset = "Training", 
            pPath = "../../Datasets/data/ProteinsDD/",
            pFoldId = self.foldId_,
            pAmino = self.aminoInput_,
            pLoadText = False)
        print(self.trainDS_.get_num_proteins(), "proteins loaded")

        print("")
        print("########## Loading test dataset")
        self.testDS_ = ProtClassProteinsDB(
            pDataset = "Validation", 
            pPath = "../../Datasets/data/ProteinsDD/",
            pFoldId = self.foldId_,
            pAmino = self.aminoInput_,
            pLoadText = False)  
        print(self.testDS_.get_num_proteins(), "proteins loaded")


    def __create_model__(self):
        """Method to create the model.
        """

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 
                3, self.batchSize_, 1, self.aminoInput_)
        
        #Create the placeholders.
        if not self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(3)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(0)

        #Create the model.
        self.model_.create_model(self.epochStep_, self.numEpochs_)

        #Create the loss function.        
        self.loss_ = self.model_.create_loss()
        self.probs_ = tf.nn.sigmoid(self.model_.logits_)

        #Create accuracy
        predictions = tf.cast(tf.greater(self.probs_, 0.5), tf.int32)
        labels = tf.cast(self.model_.labelsPH_, tf.int32)
        correct = tf.equal(predictions, labels)
        self.accuracy_ = (tf.reduce_sum(tf.cast(correct, tf.float32)) / 
            float(self.batchSize_))*100.0


    def __create_trainers__(self):
        """Method to create the trainer objects.
        """
        self.trainer_ = Trainer(self.config_._sections['Trainer'], 
            self.epochStep_, self.loss_, pCheckNans=True) 


    def __create_savers__(self):
        """Method to create the saver objects.
        """
        self.saver_ = tf.train.Saver(max_to_keep=self.maxModelsSaved_)


    def __create_tf_summaries__(self):
        """Method to create the tensorflow summaries.
        """

        self.accuracyPH_ = tf.placeholder(tf.float32)
        self.lossPH_ = tf.placeholder(tf.float32)

        #Train summaries.
        lossSummary = tf.summary.scalar('Loss', self.lossPH_)
        accuracySummary = tf.summary.scalar('Accuracy', self.accuracyPH_)
        lrSummary = tf.summary.scalar('LR', self.trainer_.learningRate_)
        self.trainingSummary_ = tf.summary.merge([lossSummary, 
            accuracySummary, lrSummary])
        
        #Test summaries.
        testLossSummary = tf.summary.scalar('Test_Loss', self.lossPH_)
        testAccuracySummary = tf.summary.scalar('Test_Accuracy', self.accuracyPH_)
        self.testSummary_ = tf.summary.merge([testLossSummary, testAccuracySummary])


    def __train_one_epoch__(self, pNumEpoch):
        """Private method to train one epoch.

        Args:
            pNumEpoch (int): Current number of epoch.
        """

        #Calculate num batches.
        numBatchesTrain = self.trainDS_.get_num_proteins()//self.batchSize_

        #Init dataset epoch.
        self.trainDS_.start_epoch()

        #Process each batch.
        accumAccuracy = 0.0
        accumLoss = 0.0
        accumCounter = 1.0
        for curBatch in range(numBatchesTrain):

            #Get the starting time.
            startDataProcess = current_milli_time()

            #Get the batch data.
            protBatch, features, labels, _, _ = self.trainDS_.get_next_batch(
                self.batchSize_, self.augment_)

            #Create the dictionary for tensorflow.
            curDict = {}
            self.model_.associate_inputs_to_ph(curDict, protBatch, features, 
                np.array(labels).reshape((-1, 1)), True)

            #Get the end time of the pre-process.
            endDataProcess = current_milli_time()

            #Execute a training step.
            curAccuracy, curLoss, wl2Loss, _ = self.sess_.run(
                [self.accuracy_, self.loss_, 
                self.trainer_.weightLoss_,
                self.trainer_.trainOps_], curDict)
            
            #Get the end time of the computation.
            endComputation = current_milli_time()

            #Accumulate the accuracy and loss.
            accumAccuracy += (curAccuracy - accumAccuracy)/accumCounter
            accumLoss += (curLoss - accumLoss)/accumCounter
            accumCounter += 1.0

            #Visualize process.
            if curBatch% 10 == 0 and curBatch > 0:
                visualize_progress(curBatch, numBatchesTrain, 
                    "Loss: %.6f (%.1f) | Accuracy: %.4f | (Data: %.2f ms | TF: %.2f ms) " %
                    (accumLoss, wl2Loss, accumAccuracy,  
                    endDataProcess-startDataProcess, 
                    endComputation-endDataProcess),
                    pSameLine = True)
        print()

        #Write the sumary.
        trainSumm = self.sess_.run(self.trainingSummary_, 
            {self.accuracyPH_ : accumAccuracy, 
            self.lossPH_ : accumLoss})
        self.summaryWriter_.add_summary(trainSumm, pNumEpoch)

    
    def __test_one_epoch__(self, pNumEpoch):
        """Private method to test one epoch.

        Args:
            pNumEpoch (int): Current number of epoch.
        """

        #Calculate num batches.
        numBatchesTest = self.testDS_.get_num_proteins()//self.batchSize_
        if self.testDS_.get_num_proteins()%self.batchSize_ != 0:
            numBatchesTest += 1

        #Init dataset epoch.
        self.testDS_.start_epoch()

        #Test the model.
        accumCounter = 1.0
        accumTestLoss = 0.0
        accumTestAccuracy = 0.0
        for curBatch in range(numBatchesTest):

            #Get the batch data.
            protBatch, features, labels, names, validProts = self.testDS_.get_next_batch(self.batchSize_)

            #Create the dictionary for tensorflow.
            curDict = {}
            self.model_.associate_inputs_to_ph(curDict, protBatch, features, 
                np.array(labels).reshape((-1,1)), False)

            #Execute a training step.
            curProbs, curLoss = self.sess_.run(
                [self.probs_, self.loss_], curDict)

            #Accum acuracy.
            for curModel in range(len(labels)):
                curPrediction = 0
                if curProbs[curModel] > 0.5:
                    curPrediction = 1
                
                curAccuracy = 0.0
                if curPrediction == labels[curModel]:
                    curAccuracy = 100.0
                accumTestAccuracy +=  (curAccuracy - accumTestAccuracy)/accumCounter 
                accumCounter += 1
            accumTestLoss += curLoss

            if curBatch% 10 == 0 and curBatch > 0:
                visualize_progress(curBatch, numBatchesTest, pSameLine = True)

        #Print the result of the test.
        totalLoss = accumTestLoss/float(numBatchesTest) 
        print("End test:")
        print("Accuracy: %.4f [%.4f]" % (accumTestAccuracy, self.bestAccuracy_))
        print("Loss: %.6f" % (totalLoss))

        #Write the summary.
        testSumm = self.sess_.run(self.testSummary_, 
            {self.accuracyPH_ : accumTestAccuracy, 
            self.lossPH_ : totalLoss})
        self.summaryWriter_.add_summary(testSumm, pNumEpoch)

        #Save the model.
        self.saver_.save(self.sess_, self.logFolder_+"/model.ckpt")

        if accumTestAccuracy > self.bestAccuracy_:
            self.bestAccuracy_ = accumTestAccuracy
            self.saver_.save(self.sess_, 
                self.logFolder_+"/best.ckpt", 
                global_step=self.epochStep_)
Exemplo n.º 6
0
class ProtFunctTestLoop(TestLoop):
    """Class to test a function prediction network.
    """
    def __init__(self, pConfigFile):
        """Constructor.

        Args:
            pConfigFile (string): Path to the configuration file.
        """

        #Load the configuration file.
        self.config_ = configparser.ConfigParser()
        self.config_.read(pConfigFile)

        #Load the parameters.
        trainConfigDict = self.config_._sections['ProtFunct']
        self.batchSize_ = int(trainConfigDict['batchsize'])
        self.augment_ = trainConfigDict['augment'] == "True"
        self.checkPointName_ = trainConfigDict['checkpointname']
        self.logFolder_ = trainConfigDict['logfolder']
        self.aminoInput_ = trainConfigDict['aminoinput'] == "True"
        self.topK_ = int(trainConfigDict['topk'])

        #Call the constructor of the parent.
        TestLoop.__init__(self, self.config_._sections['TestLoop'])

    def __create_datasets__(self):
        """Method to create the datasets.
        """

        print("")
        print("########## Loading test dataset")
        self.testDS_ = ProtFunctDataSet("Test",
                                        "../../Datasets/data/ProtFunct/",
                                        pAminoInput=self.aminoInput_,
                                        pRandSeed=33,
                                        pPermute=False,
                                        pLoadText=False)
        print(self.testDS_.get_num_proteins(), "proteins loaded")

        # Create the accumulative logits.
        self.accumLogits_ = np.full(
            (self.testDS_.get_num_proteins(), self.__get_num_terms__()),
            0.0,
            dtype=np.float64)

        #Declare the array to store the accuracy of each class each iteration.
        self.votesXClassAcc_ = np.full(
            (self.__get_num_terms__(), self.numVotes_), 0.0, dtype=np.float32)

    def __get_num_terms__(self):
        """Method to get the number of terms that are predicted.

        Returns:
            (int): Number of terms.
        """

        return self.testDS_.get_num_functions()

    def __create_model__(self):
        """Method to create the model.
        """

        #Get the number of terms.
        numTerms = self.__get_num_terms__()

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 3,
                                self.batchSize_, numTerms, self.aminoInput_)

        #Create the placeholders.
        if not self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(3)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(0)

        #Create the model.
        self.model_.create_model(0, 1)

    def __create_savers__(self):
        """Method to create the saver objects.
        """
        self.saver_ = tf.train.Saver()

    def __load_parameters__(self):
        """Method to load the parameters of a model.
        """

        #Restore the model
        self.saver_.restore(self.sess_,
                            self.logFolder_ + "/" + self.checkPointName_)

    def __test_one_voting__(self, pNumVote):
        """Private method to test on voting step.

        Args:
            pNumVote (int): Current number of vote.
        """

        numClasses = self.__get_num_terms__()

        #Calculate num batches.
        numProteins = self.testDS_.get_num_proteins()
        numBatchesTest = numProteins // self.batchSize_

        # Check if the number of proteins is not multiple of the batch size.
        if numProteins % self.batchSize_ != 0:
            numBatchesTest += 1

        #Init dataset epoch.
        self.testDS_.start_epoch()

        #Test the model.
        accuracyCats = np.full((numClasses), 0.0, dtype=np.float)
        numObjCats = np.full((numClasses), 0.0, dtype=np.float)
        for curBatch in range(numBatchesTest):

            #Calculate the current batch size.
            curBatchSize = min(
                self.batchSize_,
                len(self.testDS_.data_) - self.testDS_.iterator_)

            #Get the batch data.
            protBatch, features, labels = self.testDS_.get_next_batch(
                curBatchSize, self.augment_)

            #Create the dictionary for tensorflow.
            curDict = {}
            self.model_.associate_inputs_to_ph(curDict, protBatch, features,
                                               labels, False)

            #Execute a training step.
            curLogits = self.sess_.run(self.model_.logits_, curDict)

            #Incremental average.
            labels = np.argmax(labels, axis=-1)
            for curModel in range(curBatchSize):
                #Accumulate the logits.
                curId = curBatch * self.batchSize_ + curModel
                self.accumLogits_[curId] += (curLogits[curModel] - \
                    self.accumLogits_[curId])/float(pNumVote+1)

                #Compute accuracies.
                maxIndexs = np.argpartition(curLogits[curModel],
                                            -self.topK_)[-self.topK_:]
                if labels[curModel] in maxIndexs:
                    self.votesXClassAcc_[labels[curModel], pNumVote] += 100.0

                maxIndexs = np.argpartition(self.accumLogits_[curId],
                                            -self.topK_)[-self.topK_:]
                if labels[curModel] in maxIndexs:
                    accuracyCats[labels[curModel]] += 100.0
                numObjCats[labels[curModel]] += 1.0

            if curBatch % 10 == 0 and curBatch > 0:
                visualize_progress(curBatch, numBatchesTest, pSameLine=True)

        #Print the result of the test.
        totalAccuracy = np.sum(accuracyCats) / float(numProteins)
        totalAccuracyNV = np.sum(
            self.votesXClassAcc_[:, pNumVote]) / float(numProteins)
        accuracyCatsNV = np.full((numClasses), 0.0, dtype=np.float)
        for i in range(numClasses):
            accuracyCats[i] = accuracyCats[i] / numObjCats[i]
            accuracyCatsNV[i] = self.votesXClassAcc_[i,
                                                     pNumVote] / numObjCats[i]
        print()
        print("End test")
        print("NV -> Accuracy: %.4f | Per Class Accuracy: %.4f" %
              (totalAccuracyNV, np.mean(accuracyCatsNV)))
        print("V  -> Accuracy: %.4f | Per Class Accuracy: %.4f" %
              (totalAccuracy, np.mean(accuracyCats)))

    def __test_aggregation__(self):
        """Private method to aggregate the results of all votes.
        """

        numClasses = self.__get_num_terms__()

        #Compute the accuracy.
        numProteins = self.testDS_.get_num_proteins()
        accuracyCats = np.full((numClasses), 0.0, dtype=np.float)
        numObjCats = np.full((numClasses), 0.0, dtype=np.float)
        confMatrix = np.full((numClasses, numClasses), 0.0, dtype=np.float)
        for curProtein in range(numProteins):
            curLabel = self.testDS_.dataFunctions_[curProtein]
            predLabel = np.argmax(self.accumLogits_[curProtein])
            maxIndexs = np.argpartition(self.accumLogits_[curProtein],
                                        -self.topK_)[-self.topK_:]
            if curLabel in maxIndexs:
                accuracyCats[curLabel] += 100.0
            confMatrix[curLabel, predLabel] += 1.0
            numObjCats[curLabel] += 1.0

        #Print the result of the test.
        totalAccuracy = np.sum(accuracyCats) / float(numProteins)
        totalAccuracyNV = np.mean(
            np.sum(self.votesXClassAcc_, axis=0) / float(numProteins))
        for i in range(numClasses):
            self.votesXClassAcc_[
                i, :] = self.votesXClassAcc_[i, :] / numObjCats[i]
            accuracyCats[i] = accuracyCats[i] / numObjCats[i]

        print("")
        for i in range(numClasses):
            print("Category %12s (%4d) ->  NV: %.4f | V: %.4f" %
                  (self.testDS_.functions_[i], int(numObjCats[i]),
                   np.mean(self.votesXClassAcc_[i, :]), accuracyCats[i]))
        print("")

        print("NV -> Accuracy: %.4f | Mean Class Accuracy: %.4f" %
              (totalAccuracyNV, np.mean(self.votesXClassAcc_)))
        print("V  -> Accuracy: %.4f | Mean Class Accuracy: %.4f" %
              (totalAccuracy, np.mean(accuracyCats)))

        #Save the confusion matrix.
        with open(
                self.logFolder_ + "/" + self.checkPointName_ +
                "_conf_matrix.txt", 'w') as confMatFile:
            for curClass in range(numClasses):
                for curPredClass in range(numClasses):
                    confMatFile.write(
                        str(confMatrix[curClass, curPredClass]) + ";")
                confMatFile.write("\n")

        #Save the accuracies of each vote.
        with open(
                self.logFolder_ + "/" + self.checkPointName_ + "_vote_acc.txt",
                'w') as voteAcc:
            for curVote in range(self.numVotes_):
                voteAcc.write(
                    str(np.mean(self.votesXClassAcc_[:, curVote])) + ";")
            voteAcc.write("\n")
Exemplo n.º 7
0
class ProtFunctTrainLoop(TrainLoop):
    """Class to train a classification network on the protfunct dataset.
    """
    def __init__(self, pConfigFile):
        """Constructor.

        Args:
            pConfigFile (string): Path to the configuration file.
        """

        #Load the configuration file.
        self.config_ = configparser.ConfigParser()
        self.config_.read(pConfigFile)

        #Load the parameters.
        trainConfigDict = self.config_._sections['ProtFunct']
        self.batchSize_ = int(trainConfigDict['batchsize'])
        self.augment_ = trainConfigDict['augment'] == "True"
        self.maxModelsSaved_ = int(trainConfigDict['maxmodelssaved'])
        self.balance_ = trainConfigDict['balance'] == "True"
        self.aminoInput_ = trainConfigDict['aminoinput'] == "True"

        #Call the constructor of the parent.
        TrainLoop.__init__(self, self.config_._sections['TrainLoop'])

        #Save the config file in the log folder.
        os.system('cp %s %s' % (pConfigFile, self.logFolder_))

        #Initialize metrics.
        self.bestAccuracy_ = 0.0
        self.bestClassAccuracy_ = 0.0

    def __create_datasets__(self):
        """Method to create the datasets.
        """

        print("")
        print("########## Loading training dataset")
        self.trainDS_ = ProtFunctDataSet("Training",
                                         "../../Datasets/data/ProtFunct/",
                                         pAminoInput=self.aminoInput_,
                                         pLoadText=False)
        print(self.trainDS_.get_num_proteins(), "proteins loaded")

        print("")
        print("########## Loading test dataset")
        self.testDS_ = ProtFunctDataSet("Validation",
                                        "../../Datasets/data/ProtFunct/",
                                        pAminoInput=self.aminoInput_,
                                        pLoadText=False)
        print(self.testDS_.get_num_proteins(), "proteins loaded")

    def __get_num_terms__(self):
        """Method to get the number of terms that are predicted.

        Returns:
            (int): Number of terms.
        """

        return self.trainDS_.get_num_functions()

    def __create_model__(self):
        """Method to create the model.
        """

        #Get the number of terms.
        numTerms = self.__get_num_terms__()

        #Create the model object.
        self.model_ = ProtClass(self.config_._sections['ProtClass'], 3,
                                self.batchSize_, numTerms, self.aminoInput_)

        #Create the placeholders.
        if self.aminoInput_:
            self.numInFeatures_ = self.model_.create_placeholders(0)
        else:
            self.numInFeatures_ = self.model_.create_placeholders(3)

        #Create the model.
        self.model_.create_model(self.epochStep_, self.numEpochs_)

        #Create the loss function.
        if self.balance_:
            self.lossWeights_ = tf.placeholder(tf.float32, [numTerms])
            self.loss_ = self.model_.create_loss(self.lossWeights_)
        else:
            self.loss_ = self.model_.create_loss()

        #Create accuracy
        correct = tf.equal(tf.argmax(self.model_.logits_, 1),
                           tf.argmax(self.model_.labelsPH_, 1))
        self.accuracy_ = (tf.reduce_sum(tf.cast(correct, tf.float32)) /
                          float(self.batchSize_)) * 100.0
        self.accuracyTest_ = tf.cast(correct, tf.float32) * 100.0

    def __create_trainers__(self):
        """Method to create the trainer objects.
        """
        self.trainer_ = Trainer(self.config_._sections['Trainer'],
                                self.epochStep_,
                                self.loss_,
                                pCheckNans=False)

    def __create_savers__(self):
        """Method to create the saver objects.
        """
        self.saver_ = tf.train.Saver(max_to_keep=self.maxModelsSaved_)

    def __create_tf_summaries__(self):
        """Method to create the tensorflow summaries.
        """

        self.accuracyPH_ = tf.placeholder(tf.float32)
        self.lossPH_ = tf.placeholder(tf.float32)

        #Train summaries.
        lossSummary = tf.summary.scalar('Loss', self.lossPH_)
        accuracySummary = tf.summary.scalar('Accuracy', self.accuracyPH_)
        lrSummary = tf.summary.scalar('LR', self.trainer_.learningRate_)
        self.trainingSummary_ = tf.summary.merge(
            [lossSummary, accuracySummary, lrSummary])

        #Test summaries.
        testLossSummary = tf.summary.scalar('Test_Loss', self.lossPH_)
        testAccuracySummary = tf.summary.scalar('Test_Accuracy',
                                                self.accuracyPH_)
        self.testSummary_ = tf.summary.merge(
            [testLossSummary, testAccuracySummary])

    def __train_one_epoch__(self, pNumEpoch):
        """Private method to train one epoch.

        Args:
            pNumEpoch (int): Current number of epoch.
        """

        #Calculate num batches.
        numBatchesTrain = self.trainDS_.get_num_proteins() // self.batchSize_

        #Init dataset epoch.
        self.trainDS_.start_epoch()

        #Process each batch.
        accumAccuracy = 0.0
        accumLoss = 0.0
        accumCounter = 1.0
        for curBatch in range(numBatchesTrain):

            #Get the starting time.
            startDataProcess = current_milli_time()

            #Get the batch data.
            protBatch, features, labels = self.trainDS_.get_next_batch(
                self.batchSize_, self.augment_)

            #Create the dictionary for tensorflow.
            curDict = {}
            if self.balance_:
                curDict[self.lossWeights_] = self.trainDS_.functWeightsLog_
            self.model_.associate_inputs_to_ph(curDict, protBatch, features,
                                               labels, True)

            #Get the end time of the pre-process.
            endDataProcess = current_milli_time()

            #Execute a training step.
            curAccuracy, curLoss, wl2Loss, _ = self.sess_.run([
                self.accuracy_, self.loss_, self.trainer_.weightLoss_,
                self.trainer_.trainOps_
            ], curDict)

            #Get the end time of the computation.
            endComputation = current_milli_time()

            #Accumulate the accuracy and loss.
            accumAccuracy += (curAccuracy - accumAccuracy) / accumCounter
            accumLoss += (curLoss - accumLoss) / accumCounter
            accumCounter += 1.0

            #Visualize process.
            if curBatch % 10 == 0 and curBatch > 0:
                visualize_progress(
                    curBatch,
                    numBatchesTrain,
                    "Loss: %.6f (%.1f) | Accuracy: %.4f | (Data: %.2f ms | TF: %.2f ms) "
                    % (accumLoss, wl2Loss, accumAccuracy, endDataProcess -
                       startDataProcess, endComputation - endDataProcess),
                    pSameLine=True)
        print()

        #Write the sumary.
        trainSumm = self.sess_.run(self.trainingSummary_, {
            self.accuracyPH_: accumAccuracy,
            self.lossPH_: accumLoss
        })
        self.summaryWriter_.add_summary(trainSumm, pNumEpoch)

    def __test_one_epoch__(self, pNumEpoch):
        """Private method to test one epoch.

        Args:
            pNumEpoch (int): Current number of epoch.
        """

        #Calculate num batches.
        numBatchesTest = self.testDS_.get_num_proteins() // self.batchSize_

        #Init dataset epoch.
        self.testDS_.start_epoch()

        #Test the model.
        accumTestLoss = 0.0
        accumTestAccuracy = 0.0
        accuracyCats = np.array(
            [0.0 for i in range(len(self.testDS_.functions_))])
        numObjCats = np.array(
            [0.0 for i in range(len(self.testDS_.functions_))])
        for curBatch in range(numBatchesTest):

            #Get the batch data.
            protBatch, features, labels = self.testDS_.get_next_batch(
                self.batchSize_)

            #Create the dictionary for tensorflow.
            curDict = {}
            if self.balance_:
                curDict[self.lossWeights_] = self.trainDS_.functWeightsLog_
            self.model_.associate_inputs_to_ph(curDict, protBatch, features,
                                               labels, False)

            #Execute a training step.
            curAccuracy, curLoss = self.sess_.run(
                [self.accuracyTest_, self.loss_], curDict)

            #Accum acuracy.
            for curModel in range(self.batchSize_):
                curLabel = np.argmax(labels[curModel])
                accuracyCats[curLabel] += curAccuracy[curModel]
                numObjCats[curLabel] += 1.0
                accumTestAccuracy += curAccuracy[curModel]
            accumTestLoss += curLoss

            if curBatch % 10 == 0 and curBatch > 0:
                visualize_progress(curBatch, numBatchesTest, pSameLine=True)

        #Print the result of the test.
        for i in range(len(self.testDS_.functions_)):
            accuracyCats[i] = accuracyCats[i] / numObjCats[i]

        print("")

        numTestedProteins = numBatchesTest * self.batchSize_
        totalAccuracy = accumTestAccuracy / float(numTestedProteins)
        totalClassAccuracy = np.mean(accuracyCats)
        totalLoss = accumTestLoss / float(numBatchesTest)
        print("End test:")
        print("Accuracy: %.4f [%.4f]" % (totalAccuracy, self.bestAccuracy_))
        print("Mean Class Accuracy: %.4f [%.4f]" %
              (totalClassAccuracy, self.bestClassAccuracy_))
        print("Loss: %.6f" % (totalLoss))

        #Write the summary.
        testSumm = self.sess_.run(self.testSummary_, {
            self.accuracyPH_: totalAccuracy,
            self.lossPH_: totalLoss
        })
        self.summaryWriter_.add_summary(testSumm, pNumEpoch)

        #Save the model.
        self.saver_.save(self.sess_, self.logFolder_ + "/model.ckpt")

        if totalAccuracy > self.bestAccuracy_:
            self.bestAccuracy_ = totalAccuracy
            self.saver_.save(self.sess_,
                             self.logFolder_ + "/best.ckpt",
                             global_step=self.epochStep_)

        if totalClassAccuracy > self.bestClassAccuracy_:
            self.bestClassAccuracy_ = totalClassAccuracy
            self.saver_.save(self.sess_,
                             self.logFolder_ + "/bestxclass.ckpt",
                             global_step=self.epochStep_)