def generateNetwork(configIniName) :   

    myParserConfigIni = parserConfigIni()

    myParserConfigIni.readConfigIniFile(configIniName,0)
    print (" **********************  Starting creation model **********************")
    print (" ------------------------ General ------------------------ ")
    print (" - Network name: {}".format(myParserConfigIni.networkName))
    print (" - Folder to save the outputs: {}".format(myParserConfigIni.folderName))
    print (" ------------------------ CNN Architecture ------------------------  ")
    print (" - Number of classes: {}".format(myParserConfigIni.n_classes))
    print (" - Layers: {}".format(myParserConfigIni.layers))
    print (" - Kernel sizes: {}".format(myParserConfigIni.kernels))

    print (" - Intermediate connected CNN layers: {}".format(myParserConfigIni.intermediate_ConnectedLayers))
   
    print (" - Pooling: {}".format(myParserConfigIni.pooling_scales))
    print (" - Dropout: {}".format(myParserConfigIni.dropout_Rates))
    
    def Linear():
        print (" --- Activation function: Linear")
 
    def ReLU():
        print (" --- Activation function: ReLU")
 
    def PReLU():
        print (" --- Activation function: PReLU")

    def LeakyReLU():
        print (" --- Activation function: Leaky ReLU")
                  
    printActivationFunction = {0 : Linear,
                               1 : ReLU,
                               2 : PReLU,
                               3 : LeakyReLU}

    printActivationFunction[myParserConfigIni.activationType]()
        
    def Random(layerType):
        print (" --- Weights initialization (" +layerType+ " Layers): Random")
 
    def Delving(layerType):
        print (" --- Weights initialization (" +layerType+ " Layers): Delving")
 
    def PreTrained(layerType):
        print (" --- Weights initialization (" +layerType+ " Layers): PreTrained")
        
    printweight_Initialization_CNN = {0 : Random,
                                      1 : Delving,
                                      2 : PreTrained}
                               
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_CNN]('CNN')
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_FCN]('FCN')

    print (" ------------------------ Training Parameters ------------------------  ")
    if len(myParserConfigIni.learning_rate) == 1:
        print (" - Learning rate: {}".format(myParserConfigIni.learning_rate))
    else:
        for i in range(len(myParserConfigIni.learning_rate)):
            print (" - Learning rate at layer {} : {} ".format(str(i+1),myParserConfigIni.learning_rate[i]))
    
    print (" - Batch size: {}".format(myParserConfigIni.batch_size))

    if myParserConfigIni.applyBatchNorm == True:
        print (" - Apply batch normalization in {} epochs".format(myParserConfigIni.BatchNormEpochs))
        
    print (" ------------------------ Size of samples ------------------------  ")
    print (" - Training: {}".format(myParserConfigIni.sampleSize_Train))
    print (" - Testing: {}".format(myParserConfigIni.sampleSize_Test))

    # --------------- Create my LiviaNet3D object  --------------- 
    myLiviaNet3D = LiviaNet3D()
    
    # --------------- Create the whole architecture (Conv layers + fully connected layers + classification layer)  --------------- 
    myLiviaNet3D.createNetwork(myParserConfigIni.networkName,
                               myParserConfigIni.folderName,
                               myParserConfigIni.layers,
                               myParserConfigIni.kernels,
                               myParserConfigIni.intermediate_ConnectedLayers,
                               myParserConfigIni.n_classes,
                               myParserConfigIni.sampleSize_Train,
                               myParserConfigIni.sampleSize_Test,
                               myParserConfigIni.batch_size,
                               myParserConfigIni.applyBatchNorm,
                               myParserConfigIni.BatchNormEpochs,
                               myParserConfigIni.activationType,
                               myParserConfigIni.dropout_Rates,
                               myParserConfigIni.pooling_scales,
                               myParserConfigIni.weight_Initialization_CNN,
                               myParserConfigIni.weight_Initialization_FCN,
                               myParserConfigIni.weightsFolderName,
                               myParserConfigIni.weightsTrainedIdx,
                               myParserConfigIni.tempSoftMax
                               )
                               # TODO: Specify also the weights if pre-trained
                               
                          
    #  ---------------  Initialize all the training parameters  --------------- 
    myLiviaNet3D.initTrainingParameters(myParserConfigIni.costFunction,
                                        myParserConfigIni.L1_reg_C,
                                        myParserConfigIni.L2_reg_C,
                                        myParserConfigIni.learning_rate,
                                        myParserConfigIni.momentumType,
                                        myParserConfigIni.momentumValue,
                                        myParserConfigIni.momentumNormalized,
                                        myParserConfigIni.optimizerType,
                                        myParserConfigIni.rho_RMSProp,
                                        myParserConfigIni.epsilon_RMSProp
                                        )
   
    # ---------------  Compile the functions (Training/Validation/Testing) --------------- 
    myLiviaNet3D.compileTheanoFunctions()

    #  --------------- Save the model --------------- 
    # Generate folders to store the model
    BASE_DIR  = os.getcwd()
    path_Temp = os.path.join(BASE_DIR,'outputFiles')
    # For the networks
    netFolderName  = os.path.join(path_Temp,myParserConfigIni.folderName)
    netFolderName  = os.path.join(netFolderName,'Networks')
   
    # For the predictions
    predlFolderName    = os.path.join(path_Temp,myParserConfigIni.folderName)
    predlFolderName    = os.path.join(predlFolderName,'Pred')
    predValFolderName  = os.path.join(predlFolderName,'Validation')
    predTestFolderName = os.path.join(predlFolderName,'Testing')
   
    makeFolder(netFolderName, "Networks")
    makeFolder(predValFolderName, "to store predictions (Validation)")
    makeFolder(predTestFolderName, "to store predictions (Testing)")

    modelFileName = netFolderName + "/" + myParserConfigIni.networkName + "_Epoch0"
    dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
    
    strFinal =  " Network model saved in " + netFolderName + " as " + myParserConfigIni.networkName + "_Epoch0"
    print  (strFinal)
    
    return modelFileName
Ejemplo n.º 2
0
def generateNetwork(configIniName) :   

    myParserConfigIni = parserConfigIni()

    myParserConfigIni.readConfigIniFile(configIniName,0)
    print " **********************  Starting creation model **********************"
    print " ------------------------ General ------------------------ "
    print " - Network name: {}".format(myParserConfigIni.networkName)
    print " - Folder to save the outputs: {}".format(myParserConfigIni.folderName)
    print " ------------------------ CNN Architecture ------------------------  "
    print " - Number of classes: {}".format(myParserConfigIni.n_classes)
    print " - Layers: {}".format(myParserConfigIni.layers)
    print " - Kernel sizes: {}".format(myParserConfigIni.kernels)

    print " - Intermediate connected CNN layers: {}".format(myParserConfigIni.intermediate_ConnectedLayers)
   
    print " - Pooling: {}".format(myParserConfigIni.pooling_scales)
    print " - Dropout: {}".format(myParserConfigIni.dropout_Rates)
    
    def Linear():
        print " --- Activation function: Linear"
 
    def ReLU():
        print " --- Activation function: ReLU"
 
    def PReLU():
        print " --- Activation function: PReLU"

    def LeakyReLU():
        print " --- Activation function: Leaky ReLU"
                  
    printActivationFunction = {0 : Linear,
                               1 : ReLU,
                               2 : PReLU,
                               3 : LeakyReLU}

    printActivationFunction[myParserConfigIni.activationType]()
        
    def Random(layerType):
        print " --- Weights initialization (" +layerType+ " Layers): Random"
 
    def Delving(layerType):
        print " --- Weights initialization (" +layerType+ " Layers): Delving"
 
    def PreTrained(layerType):
        print " --- Weights initialization (" +layerType+ " Layers): PreTrained"
        
    printweight_Initialization_CNN = {0 : Random,
                                      1 : Delving,
                                      2 : PreTrained}
                               
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_CNN]('CNN')
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_FCN]('FCN')

    print " ------------------------ Training Parameters ------------------------  "
    if len(myParserConfigIni.learning_rate) == 1:
        print " - Learning rate: {}".format(myParserConfigIni.learning_rate)
    else:
        for i in xrange(len(myParserConfigIni.learning_rate)):
            print " - Learning rate at layer {} : {} ".format(str(i+1),myParserConfigIni.learning_rate[i])
    
    print " - Batch size: {}".format(myParserConfigIni.batch_size)

    if myParserConfigIni.applyBatchNorm == True:
        print " - Apply batch normalization in {} epochs".format(myParserConfigIni.BatchNormEpochs)
        
    print " ------------------------ Size of samples ------------------------  "
    print " - Training: {}".format(myParserConfigIni.sampleSize_Train)
    print " - Testing: {}".format(myParserConfigIni.sampleSize_Test)

    # --------------- Create my LiviaSemiDenseNet3D object  --------------- 
    myLiviaSemiDenseNet3D = LiviaSemiDenseNet3D()
    
    # --------------- Create the whole architecture (Conv layers + fully connected layers + classification layer)  --------------- 
    myLiviaSemiDenseNet3D.createNetwork(myParserConfigIni.networkName,
                               myParserConfigIni.folderName,
                               myParserConfigIni.layers,
                               myParserConfigIni.kernels,
                               myParserConfigIni.intermediate_ConnectedLayers,
                               myParserConfigIni.n_classes,
                               myParserConfigIni.sampleSize_Train,
                               myParserConfigIni.sampleSize_Test,
                               myParserConfigIni.batch_size,
                               myParserConfigIni.applyBatchNorm,
                               myParserConfigIni.BatchNormEpochs,
                               myParserConfigIni.activationType,
                               myParserConfigIni.dropout_Rates,
                               myParserConfigIni.pooling_scales,
                               myParserConfigIni.weight_Initialization_CNN,
                               myParserConfigIni.weight_Initialization_FCN,
                               myParserConfigIni.weightsFolderName,
                               myParserConfigIni.weightsTrainedIdx,
                               myParserConfigIni.tempSoftMax
                               )
                               # TODO: Specify also the weights if pre-trained
                               
                          
    #  ---------------  Initialize all the training parameters  --------------- 
    myLiviaSemiDenseNet3D.initTrainingParameters(myParserConfigIni.costFunction,
                                        myParserConfigIni.L1_reg_C,
                                        myParserConfigIni.L2_reg_C,
                                        myParserConfigIni.learning_rate,
                                        myParserConfigIni.momentumType,
                                        myParserConfigIni.momentumValue,
                                        myParserConfigIni.momentumNormalized,
                                        myParserConfigIni.optimizerType,
                                        myParserConfigIni.rho_RMSProp,
                                        myParserConfigIni.epsilon_RMSProp
                                        )
   
    # ---------------  Compile the functions (Training/Validation/Testing) --------------- 
    myLiviaSemiDenseNet3D.compileTheanoFunctions()

    #  --------------- Save the model --------------- 
    # Generate folders to store the model
    BASE_DIR  = os.getcwd()
    path_Temp = os.path.join(BASE_DIR,'outputFiles')
    # For the networks
    netFolderName  = os.path.join(path_Temp,myParserConfigIni.folderName)
    netFolderName  = os.path.join(netFolderName,'Networks')
   
    # For the predictions
    predlFolderName    = os.path.join(path_Temp,myParserConfigIni.folderName)
    predlFolderName    = os.path.join(predlFolderName,'Pred')
    predValFolderName  = os.path.join(predlFolderName,'Validation')
    predTestFolderName = os.path.join(predlFolderName,'Testing')
   
    makeFolder(netFolderName, "Networks")
    makeFolder(predValFolderName, "to store predictions (Validation)")
    makeFolder(predTestFolderName, "to store predictions (Testing)")

    modelFileName = netFolderName + "/" + myParserConfigIni.networkName + "_Epoch0"
    dump_model_to_gzip_file(myLiviaSemiDenseNet3D, modelFileName)
    
    strFinal =  " Network model saved in " + netFolderName + " as " + myParserConfigIni.networkName + "_Epoch0"
    print  strFinal
    
    return modelFileName
Ejemplo n.º 3
0
def startTraining(networkModelName,configIniName):
    print (" ************************************************  STARTING TRAINING **************************************************")
    print (" **********************  Starting training model (Reading parameters) **********************")

    myParserConfigIni = parserConfigIni()
   
    myParserConfigIni.readConfigIniFile(configIniName,1)
    
    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print ("-------- Reading Images names used in training/validation -------------")
##-----##
    # from sklearn.model_selection import KFold
    # import numpy as np
    # y1 = myParserConfigIni.indexesForTraining
    # #x1 = myParserConfigIni.indexesForValidation
    # kf = KFold(n_splits= 5)
    #
    # for train_index, test_index in kf.split(y1):
    #     print("TRAIN:", train_index, "TEST:", test_index)
    #     y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##-----##
    # from sklearn.model_selection import LeavePOut
    # lpo = LeavePOut(p=5)
    # y1 = myParserConfigIni.indexesForTraining
    # for train, test in lpo.split(y1):
    #     y, x = np.array(y1)[train], np.array(y1)[test]
##-----train##
    from sklearn.cross_validation import LeaveOneOut
    loo = LeaveOneOut(4)
    y1 = myParserConfigIni.indexesForTraining
    x1 = myParserConfigIni.indexesForValidation
    for train_index, test_index in loo:
        print("TRAIN:", train_index, "TEST:", test_index)
        y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##------he
    # from sklearn.model_selection import train_test_split
    #     X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)

    # -- Get list of images used for training -- #

    (imageNames_Train, names_Train)          = getImagesSet(myParserConfigIni.imagesFolder,y)  # Images
    (groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth
    (roiNames_Train, roi_names_Train)        = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI

    # -- Get list of images used for validation -- #
    (imageNames_Val, names_Val)          = getImagesSet(myParserConfigIni.imagesFolder,x)  # Images
    (groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth
    (roiNames_Val, roi_names_Val)        = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI

    # Print names
    print (" ================== Images for training ================")
    for i in range(0,len(names_Train)):
       if len(roi_names_Train) > 0:
            print(" Image({}): {}  |  GT: {}  |  ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
       else:
            print(" Image({}): {}  |  GT: {}  ".format(i,names_Train[i], gt_names_Train[i] ))
    print (" ================== Images for validation ================")
    for i in range(0,len(names_Val)):
        if len(roi_names_Train) > 0:
            print(" Image({}): {}  |  GT: {}  |  ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
        else:
            print(" Image({}): {}  |  GT: {}  ".format(i,names_Val[i], gt_names_Val[i]))
    print (" ===============================================================")
   
    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print (" ... Network architecture successfully loaded....")

    # Asign parameters to loaded Net
    myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
    myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    myLiviaNet3D.numberOfSamplesSupEpoch  = myParserConfigIni.numberOfSamplesSupEpoch
    myLiviaNet3D.firstEpochChangeLR  = myParserConfigIni.firstEpochChangeLR
    myLiviaNet3D.frequencyChangeLR  = myParserConfigIni.frequencyChangeLR
    
    numberOfEpochs = myLiviaNet3D.numberOfEpochs
    numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
    numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
    
    # --------------- --------------  --------------- 
    # --------------- Start TRAINING  --------------- 
    # --------------- --------------  --------------- 
    # Get sample dimension values
    receptiveField = myLiviaNet3D.receptiveField
    sampleSize_Train = myLiviaNet3D.sampleSize_Train

    trainingCost = []

    if myParserConfigIni.applyPadding == 1:
        applyPadding = True
    else:
        applyPadding = False
    
    learningRateModifiedEpoch = 0
    
    # Run over all the (remaining) epochs and subepochs
    for e_i in xrange(numberOfEpochs):
        # Recover last trained epoch
        numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
                                        
        print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))

        costsOfEpoch = []
        
        for subE_i in xrange(numberOfSubEpochs): 
            epoch_nr = subE_i+1
            print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))

            # Get all the samples that will be used in this sub-epoch
            [imagesSamplesAll,
            gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
                                                imageNames_Train,
                                                groundTruthNames_Train,
                                                roiNames_Train,
                                                imageType,
                                                sampleSize_Train,
                                                receptiveField,
                                                applyPadding
                                                )

            # Variable that will contain weights for the cost function
            # --- In its current implementation, all the classes have the same weight
            weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
               
            numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size 
            
            myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
            myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
                 
            costsOfBatches = []
            evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
    
            for b_i in xrange(numberBatches):
                # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
                costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
                meanBatchCostError = costErrors[0]
                costsOfBatches.append(meanBatchCostError)
                myLiviaNet3D.updateLayersMatricesBatchNorm() 

            
            #======== Calculate and Report accuracy over subepoch
            meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
            print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
            
            # Release data
            myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
            myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))

            # Get mean cost epoch
            costsOfEpoch.append(meanCostOfSubepoch)

        meanCostOfEpoch =  sum(costsOfEpoch) / float(numberOfSubEpochs)
        
        # Include the epoch cost to the main training cost and update current mean 
        trainingCost.append(meanCostOfEpoch)
        currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
        
        print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
        print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
        print(" -------------------------------------------------------- " )
        
        # ------------- Update Learning Rate if required ----------------#

        if e_i >= myLiviaNet3D.firstEpochChangeLR :
            if learningRateModifiedEpoch == 0:
                currentLR = myLiviaNet3D.learning_rate.get_value()
                newLR = currentLR / 2.0
                myLiviaNet3D.learning_rate.set_value(newLR)
                print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                learningRateModifiedEpoch = e_i
            else:
                if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
                    currentLR = myLiviaNet3D.learning_rate.get_value()
                    newLR = currentLR / 2.0
                    myLiviaNet3D.learning_rate.set_value(newLR)
                    print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                    learningRateModifiedEpoch = e_i
                
        # ---------------------- Start validation ---------------------- #
        
        numberImagesToSegment = len(imageNames_Val)
        print(" ********************** Starting validation **********************")

        # Run over the images to segment   
        for i_d in xrange(numberImagesToSegment) :
            print("-------------  Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
            strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
            
            segmentVolume(myLiviaNet3D,
                          i_d,
                          imageNames_Val,  # Full path
                          names_Val,       # Only image name
                          groundTruthNames_Val,
                          roiNames_Val,
                          imageType,
                          applyPadding,
                          receptiveField, 
                          sampleSize_Train,
                          strideValues,
                          myLiviaNet3D.batch_Size,
                          0 # Validation (0) or testing (1)
                          )
                         
       
        print(" ********************** Validation DONE ********************** ")

        # ------ In this point the training is done at Epoch n ---------#
        # Increase number of epochs trained
        myLiviaNet3D.numberOfEpochsTrained += 1

        #  --------------- Save the model --------------- 
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR,'outputFiles')
        netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
        netFolderName  = os.path.join(netFolderName,'Networks')

        modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
 
        strFinal =  " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        print  (strFinal)

    print("................ The whole Training is done.....")
    print(" ************************************************************************************ ")
Ejemplo n.º 4
0
def startTraining(networkModelName,configIniName):
    print " ************************************************  STARTING TRAINING **************************************************"
    print " **********************  Starting training model (Reading parameters) **********************"

    myParserConfigIni = parserConfigIni()
   
    myParserConfigIni.readConfigIniFile(configIniName,1)
    
    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print "-------- Reading Images names used in training/validation -------------"

    # -- Get list of images used for training -- #
    (imageNames_Train, names_Train)                = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForTraining)  # Images
    (imageNames_Train_Bottom, names_Train_Bottom)  = getImagesSet(myParserConfigIni.imagesFolder_Bottom,myParserConfigIni.indexesForTraining)  # Images
    (groundTruthNames_Train, gt_names_Train)       = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesForTraining) # Ground truth
    (roiNames_Train, roi_names_Train)              = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesForTraining) # ROI
    
    # -- Get list of images used for validation -- #
    (imageNames_Val, names_Val)               = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForValidation)  # Images
    (imageNames_Val_Bottom, names_Val_Bottom) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForValidation)  # Images
    (groundTruthNames_Val, gt_names_Val)      = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesForValidation) # Ground truth
    (roiNames_Val, roi_names_Val)             = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesForValidation) # ROI

    # Print names
    print " ================== Images for training ================"
    for i in range(0,len(names_Train)):
       if len(roi_names_Train) > 0:
            print(" Image({}): Top {}  |  Bottom: {}  |  GT: {}  |  ROI {} ".format(i,names_Train[i], names_Train_Bottom[i], gt_names_Train[i], roi_names_Train[i] ))
       else:
            print(" Image({}): Top {}  |  Bottom: {}  |  GT: {}  ".format(i,names_Train[i], names_Train_Bottom[i], gt_names_Train[i] ))
    print " ================== Images for validation ================"
    for i in range(0,len(names_Val)):
        if len(roi_names_Train) > 0:
            print(" Image({}): Top {}  |  Bottom  {}  |  GT: {}  |  ROI {} ".format(i,names_Val[i], names_Val_Bottom[i], gt_names_Val[i], roi_names_Val[i] ))
        else:
            print(" Image({}): Top {}  |  Bottom  {}  |  GT: {}  ".format(i,names_Val[i],  names_Val_Bottom[i], gt_names_Val[i]))
    print " ==============================================================="
   
    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Asign parameters to loaded Net
    myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
    myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    myLiviaNet3D.numberOfSamplesSupEpoch  = myParserConfigIni.numberOfSamplesSupEpoch
    myLiviaNet3D.firstEpochChangeLR  = myParserConfigIni.firstEpochChangeLR
    myLiviaNet3D.frequencyChangeLR  = myParserConfigIni.frequencyChangeLR
    
    numberOfEpochs = myLiviaNet3D.numberOfEpochs
    numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
    numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
    
    # --------------- --------------  --------------- 
    # --------------- Start TRAINING  --------------- 
    # --------------- --------------  --------------- 
    # Get sample dimension values
    receptiveField = myLiviaNet3D.receptiveField
    sampleSize_Train = myLiviaNet3D.sampleSize_Train

    trainingCost = []

    if myParserConfigIni.applyPadding == 1:
        applyPadding = True
    else:
        applyPadding = False
    
    learningRateModifiedEpoch = 0
    
    # Run over all the (remaining) epochs and subepochs
    for e_i in xrange(numberOfEpochs):
        # Recover last trained epoch
        numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
                                        
        print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))

        costsOfEpoch = []
        
        for subE_i in xrange(numberOfSubEpochs): 
            epoch_nr = subE_i+1
            print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))

            # Get all the samples that will be used in this sub-epoch
            [imagesSamplesAll,
            imagesSamplesAll_Bottom,
            gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
                                                imageNames_Train,
                                                imageNames_Train,
                                                groundTruthNames_Train,
                                                roiNames_Train,
                                                imageType,
                                                sampleSize_Train,
                                                receptiveField,
                                                applyPadding
                                                )

            # Variable that will contain weights for the cost function
            # --- In its current implementation, all the classes have the same weight
            weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
               
            numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size 
            
            myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
            myLiviaNet3D.trainingData_x_Bottom.set_value(imagesSamplesAll_Bottom, borrow=True)
            myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
                 
            costsOfBatches = []
            evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
    
            for b_i in xrange(numberBatches):
                # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
                costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
                meanBatchCostError = costErrors[0]
                costsOfBatches.append(meanBatchCostError)
                myLiviaNet3D.updateLayersMatricesBatchNorm() 

            
            #======== Calculate and Report accuracy over subepoch
            meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
            print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
            
            # Release data
            myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
            myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))

            # Get mean cost epoch
            costsOfEpoch.append(meanCostOfSubepoch)

        meanCostOfEpoch =  sum(costsOfEpoch) / float(numberOfSubEpochs)
        
        # Include the epoch cost to the main training cost and update current mean 
        trainingCost.append(meanCostOfEpoch)
        currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
        
        print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
        print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
        print(" -------------------------------------------------------- " )
        
        # ------------- Update Learning Rate if required ----------------#
        if e_i >= myLiviaNet3D.firstEpochChangeLR :
            if learningRateModifiedEpoch == 0:
                currentLR = myLiviaNet3D.learning_rate.get_value()
                newLR = currentLR / 2.0
                myLiviaNet3D.learning_rate.set_value(newLR)
                print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                learningRateModifiedEpoch = e_i
            else:
                if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
                    currentLR = myLiviaNet3D.learning_rate.get_value()
                    newLR = currentLR / 2.0
                    myLiviaNet3D.learning_rate.set_value(newLR)
                    print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                    learningRateModifiedEpoch = e_i
                
        # ---------------------- Start validation ---------------------- #
        
        numberImagesToSegment = len(imageNames_Val)
        print(" ********************** Starting validation **********************")

        # Run over the images to segment   
        for i_d in xrange(numberImagesToSegment) :
            print("-------------  Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
            strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
            
            segmentVolume(myLiviaNet3D,
                          i_d,
                          imageNames_Val,  # Full path
                          imageNames_Val_Bottom,
                          names_Val,       # Only image name
                          groundTruthNames_Val,
                          roiNames_Val,
                          imageType,
                          applyPadding,
                          receptiveField, 
                          sampleSize_Train,
                          strideValues,
                          myLiviaNet3D.batch_Size,
                          0 # Validation (0) or testing (1)
                          )
                         
       
        print(" ********************** Validation DONE ********************** ")

        # ------ In this point the training is done at Epoch n ---------#
        # Increase number of epochs trained
        myLiviaNet3D.numberOfEpochsTrained += 1

        #  --------------- Save the model --------------- 
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR,'outputFiles')
        netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
        netFolderName  = os.path.join(netFolderName,'Networks')

        modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
 
        strFinal =  " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        print  strFinal

    print("................ The whole Training is done.....")
    print(" ************************************************************************************ ")