Пример #1
0
def startTesting(FCNname,configIniName) :

    #padInputImagesBool = True # from config ini
    print (" ******************************************  STARTING SEGMENTATION ******************************************")

    print (" **********************  Starting segmentation **********************")
    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName,2)
    

    print (" -------- Images to segment -------------")

    print (" -------- Reading Images names for segmentation -------------")
    
    # -- Get list of images used for testing -- #
    (imageNames_Test, names_Test) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Test, gt_names_Test) = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesToSegment) # Ground truth

    print (" ================== Images for training ================")
    for i in range(0,len(names_Test)):
        print(" Image({}): {}  |  GT: {}  ".format(i,names_Test[i], gt_names_Test[i] ))

    folderName            = myParserConfigIni.folderName
    batch_size            = myParserConfigIni.batch_size
    sampleSize_Test       = myParserConfigIni.Patch_Size_Test
    imageType             = myParserConfigIni.imageTypes
    strideValues          = myParserConfigIni.strideValues
    numberOfClass         = myParserConfigIni.n_classes
    numberImagesToSegment = len(imageNames_Test)

    # --------------- Load my FCN object  --------------- 
    print (" ... Loading model from {}".format(FCNname))
    model = torch.load(FCNname)
 
    print (" ... Network architecture successfully loaded....")

    for i_d in range(numberImagesToSegment) :
        print("**********************  Segmenting subject: {} ....total: {}/{}...**********************".format(names_Test[i_d],str(i_d+1),str(numberImagesToSegment)))
        
        segmentVolume(model,
                      folderName,
                      i_d,
                      imageNames_Test,
                      names_Test,
                      groundTruthNames_Test,
                      imageType,
                      sampleSize_Test,
                      strideValues,
                      numberOfClass,
                      batch_size,
                      1 # Validation (0) or testing (1)
                      )      
       
    print(" **************************************************************************************************** ")
def networkTraining(argv):
    # Number of input arguments
    #    1: ConfigIniName
    #    2: TrainingType
    #             0: Create a new model and start training
    #             1: Use an existing model to keep on training (Requires an additional input with model name)
    #    3: (Optional, but required if arg 2 is equal to 1) Network model name

    # Do some sanity checks

    if len(argv) < 2:
        printUsage(1)
        sys.exit()

    configIniName = argv[0]
    trainingType = argv[1]

    if trainingType == '1' and len(argv) == 2:
        printUsage(2)
        sys.exit()

    if len(argv) > 2:
        networkModelName = argv[2]

    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName, 1)
    numberOfClass = myParserConfigIni.n_classes
    # Creating a new model
    if trainingType == '0':
        print(
            " ******************************************  CREATING NETWORK ******************************************"
        )
        model = FCN(numberOfClass)
        startTraining(model, configIniName, 0)
        print(
            " ******************************************  NETWORK CREATED ******************************************"
        )
    else:
        print(
            " ******************************************  STARTING NETWORK TRAINING ******************************************"
        )
        model = torch.load(networkModelName)
        iterN = networkModelName.split('/')[-1]
        iterN = int(iterN.split('h')[-1])
        startTraining(model, configIniName, iterN)
        print(
            " ******************************************  DONE  ******************************************"
        )
def generateNetwork(configIniName) :   

    myParserConfigIni = parserConfigIni()

    myParserConfigIni.readConfigIniFile(configIniName,0)
    print (" **********************  Starting creation model **********************")
    print (" ------------------------ General ------------------------ ")
    print (" - Network name: {}".format(myParserConfigIni.networkName))
    print (" - Folder to save the outputs: {}".format(myParserConfigIni.folderName))
    print (" ------------------------ CNN Architecture ------------------------  ")
    print (" - Number of classes: {}".format(myParserConfigIni.n_classes))
    print (" - Layers: {}".format(myParserConfigIni.layers))
    print (" - Kernel sizes: {}".format(myParserConfigIni.kernels))

    print (" - Intermediate connected CNN layers: {}".format(myParserConfigIni.intermediate_ConnectedLayers))
   
    print (" - Pooling: {}".format(myParserConfigIni.pooling_scales))
    print (" - Dropout: {}".format(myParserConfigIni.dropout_Rates))
    
    def Linear():
        print (" --- Activation function: Linear")
 
    def ReLU():
        print (" --- Activation function: ReLU")
 
    def PReLU():
        print (" --- Activation function: PReLU")

    def LeakyReLU():
        print (" --- Activation function: Leaky ReLU")
                  
    printActivationFunction = {0 : Linear,
                               1 : ReLU,
                               2 : PReLU,
                               3 : LeakyReLU}

    printActivationFunction[myParserConfigIni.activationType]()
        
    def Random(layerType):
        print (" --- Weights initialization (" +layerType+ " Layers): Random")
 
    def Delving(layerType):
        print (" --- Weights initialization (" +layerType+ " Layers): Delving")
 
    def PreTrained(layerType):
        print (" --- Weights initialization (" +layerType+ " Layers): PreTrained")
        
    printweight_Initialization_CNN = {0 : Random,
                                      1 : Delving,
                                      2 : PreTrained}
                               
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_CNN]('CNN')
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_FCN]('FCN')

    print (" ------------------------ Training Parameters ------------------------  ")
    if len(myParserConfigIni.learning_rate) == 1:
        print (" - Learning rate: {}".format(myParserConfigIni.learning_rate))
    else:
        for i in range(len(myParserConfigIni.learning_rate)):
            print (" - Learning rate at layer {} : {} ".format(str(i+1),myParserConfigIni.learning_rate[i]))
    
    print (" - Batch size: {}".format(myParserConfigIni.batch_size))

    if myParserConfigIni.applyBatchNorm == True:
        print (" - Apply batch normalization in {} epochs".format(myParserConfigIni.BatchNormEpochs))
        
    print (" ------------------------ Size of samples ------------------------  ")
    print (" - Training: {}".format(myParserConfigIni.sampleSize_Train))
    print (" - Testing: {}".format(myParserConfigIni.sampleSize_Test))

    # --------------- Create my LiviaNet3D object  --------------- 
    myLiviaNet3D = LiviaNet3D()
    
    # --------------- Create the whole architecture (Conv layers + fully connected layers + classification layer)  --------------- 
    myLiviaNet3D.createNetwork(myParserConfigIni.networkName,
                               myParserConfigIni.folderName,
                               myParserConfigIni.layers,
                               myParserConfigIni.kernels,
                               myParserConfigIni.intermediate_ConnectedLayers,
                               myParserConfigIni.n_classes,
                               myParserConfigIni.sampleSize_Train,
                               myParserConfigIni.sampleSize_Test,
                               myParserConfigIni.batch_size,
                               myParserConfigIni.applyBatchNorm,
                               myParserConfigIni.BatchNormEpochs,
                               myParserConfigIni.activationType,
                               myParserConfigIni.dropout_Rates,
                               myParserConfigIni.pooling_scales,
                               myParserConfigIni.weight_Initialization_CNN,
                               myParserConfigIni.weight_Initialization_FCN,
                               myParserConfigIni.weightsFolderName,
                               myParserConfigIni.weightsTrainedIdx,
                               myParserConfigIni.tempSoftMax
                               )
                               # TODO: Specify also the weights if pre-trained
                               
                          
    #  ---------------  Initialize all the training parameters  --------------- 
    myLiviaNet3D.initTrainingParameters(myParserConfigIni.costFunction,
                                        myParserConfigIni.L1_reg_C,
                                        myParserConfigIni.L2_reg_C,
                                        myParserConfigIni.learning_rate,
                                        myParserConfigIni.momentumType,
                                        myParserConfigIni.momentumValue,
                                        myParserConfigIni.momentumNormalized,
                                        myParserConfigIni.optimizerType,
                                        myParserConfigIni.rho_RMSProp,
                                        myParserConfigIni.epsilon_RMSProp
                                        )
   
    # ---------------  Compile the functions (Training/Validation/Testing) --------------- 
    myLiviaNet3D.compileTheanoFunctions()

    #  --------------- Save the model --------------- 
    # Generate folders to store the model
    BASE_DIR  = os.getcwd()
    path_Temp = os.path.join(BASE_DIR,'outputFiles')
    # For the networks
    netFolderName  = os.path.join(path_Temp,myParserConfigIni.folderName)
    netFolderName  = os.path.join(netFolderName,'Networks')
   
    # For the predictions
    predlFolderName    = os.path.join(path_Temp,myParserConfigIni.folderName)
    predlFolderName    = os.path.join(predlFolderName,'Pred')
    predValFolderName  = os.path.join(predlFolderName,'Validation')
    predTestFolderName = os.path.join(predlFolderName,'Testing')
   
    makeFolder(netFolderName, "Networks")
    makeFolder(predValFolderName, "to store predictions (Validation)")
    makeFolder(predTestFolderName, "to store predictions (Testing)")

    modelFileName = netFolderName + "/" + myParserConfigIni.networkName + "_Epoch0"
    dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
    
    strFinal =  " Network model saved in " + netFolderName + " as " + myParserConfigIni.networkName + "_Epoch0"
    print  (strFinal)
    
    return modelFileName
Пример #4
0
def startTraining(networkModelName,configIniName):
    print (" ************************************************  STARTING TRAINING **************************************************")
    print (" **********************  Starting training model (Reading parameters) **********************")

    myParserConfigIni = parserConfigIni()
   
    myParserConfigIni.readConfigIniFile(configIniName,1)
    
    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print ("-------- Reading Images names used in training/validation -------------")
##-----##
    # from sklearn.model_selection import KFold
    # import numpy as np
    # y1 = myParserConfigIni.indexesForTraining
    # #x1 = myParserConfigIni.indexesForValidation
    # kf = KFold(n_splits= 5)
    #
    # for train_index, test_index in kf.split(y1):
    #     print("TRAIN:", train_index, "TEST:", test_index)
    #     y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##-----##
    # from sklearn.model_selection import LeavePOut
    # lpo = LeavePOut(p=5)
    # y1 = myParserConfigIni.indexesForTraining
    # for train, test in lpo.split(y1):
    #     y, x = np.array(y1)[train], np.array(y1)[test]
##-----train##
    from sklearn.cross_validation import LeaveOneOut
    loo = LeaveOneOut(4)
    y1 = myParserConfigIni.indexesForTraining
    x1 = myParserConfigIni.indexesForValidation
    for train_index, test_index in loo:
        print("TRAIN:", train_index, "TEST:", test_index)
        y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##------he
    # from sklearn.model_selection import train_test_split
    #     X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)

    # -- Get list of images used for training -- #

    (imageNames_Train, names_Train)          = getImagesSet(myParserConfigIni.imagesFolder,y)  # Images
    (groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth
    (roiNames_Train, roi_names_Train)        = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI

    # -- Get list of images used for validation -- #
    (imageNames_Val, names_Val)          = getImagesSet(myParserConfigIni.imagesFolder,x)  # Images
    (groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth
    (roiNames_Val, roi_names_Val)        = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI

    # Print names
    print (" ================== Images for training ================")
    for i in range(0,len(names_Train)):
       if len(roi_names_Train) > 0:
            print(" Image({}): {}  |  GT: {}  |  ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
       else:
            print(" Image({}): {}  |  GT: {}  ".format(i,names_Train[i], gt_names_Train[i] ))
    print (" ================== Images for validation ================")
    for i in range(0,len(names_Val)):
        if len(roi_names_Train) > 0:
            print(" Image({}): {}  |  GT: {}  |  ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
        else:
            print(" Image({}): {}  |  GT: {}  ".format(i,names_Val[i], gt_names_Val[i]))
    print (" ===============================================================")
   
    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print (" ... Network architecture successfully loaded....")

    # Asign parameters to loaded Net
    myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
    myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    myLiviaNet3D.numberOfSamplesSupEpoch  = myParserConfigIni.numberOfSamplesSupEpoch
    myLiviaNet3D.firstEpochChangeLR  = myParserConfigIni.firstEpochChangeLR
    myLiviaNet3D.frequencyChangeLR  = myParserConfigIni.frequencyChangeLR
    
    numberOfEpochs = myLiviaNet3D.numberOfEpochs
    numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
    numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
    
    # --------------- --------------  --------------- 
    # --------------- Start TRAINING  --------------- 
    # --------------- --------------  --------------- 
    # Get sample dimension values
    receptiveField = myLiviaNet3D.receptiveField
    sampleSize_Train = myLiviaNet3D.sampleSize_Train

    trainingCost = []

    if myParserConfigIni.applyPadding == 1:
        applyPadding = True
    else:
        applyPadding = False
    
    learningRateModifiedEpoch = 0
    
    # Run over all the (remaining) epochs and subepochs
    for e_i in xrange(numberOfEpochs):
        # Recover last trained epoch
        numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
                                        
        print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))

        costsOfEpoch = []
        
        for subE_i in xrange(numberOfSubEpochs): 
            epoch_nr = subE_i+1
            print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))

            # Get all the samples that will be used in this sub-epoch
            [imagesSamplesAll,
            gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
                                                imageNames_Train,
                                                groundTruthNames_Train,
                                                roiNames_Train,
                                                imageType,
                                                sampleSize_Train,
                                                receptiveField,
                                                applyPadding
                                                )

            # Variable that will contain weights for the cost function
            # --- In its current implementation, all the classes have the same weight
            weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
               
            numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size 
            
            myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
            myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
                 
            costsOfBatches = []
            evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
    
            for b_i in xrange(numberBatches):
                # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
                costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
                meanBatchCostError = costErrors[0]
                costsOfBatches.append(meanBatchCostError)
                myLiviaNet3D.updateLayersMatricesBatchNorm() 

            
            #======== Calculate and Report accuracy over subepoch
            meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
            print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
            
            # Release data
            myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
            myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))

            # Get mean cost epoch
            costsOfEpoch.append(meanCostOfSubepoch)

        meanCostOfEpoch =  sum(costsOfEpoch) / float(numberOfSubEpochs)
        
        # Include the epoch cost to the main training cost and update current mean 
        trainingCost.append(meanCostOfEpoch)
        currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
        
        print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
        print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
        print(" -------------------------------------------------------- " )
        
        # ------------- Update Learning Rate if required ----------------#

        if e_i >= myLiviaNet3D.firstEpochChangeLR :
            if learningRateModifiedEpoch == 0:
                currentLR = myLiviaNet3D.learning_rate.get_value()
                newLR = currentLR / 2.0
                myLiviaNet3D.learning_rate.set_value(newLR)
                print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                learningRateModifiedEpoch = e_i
            else:
                if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
                    currentLR = myLiviaNet3D.learning_rate.get_value()
                    newLR = currentLR / 2.0
                    myLiviaNet3D.learning_rate.set_value(newLR)
                    print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                    learningRateModifiedEpoch = e_i
                
        # ---------------------- Start validation ---------------------- #
        
        numberImagesToSegment = len(imageNames_Val)
        print(" ********************** Starting validation **********************")

        # Run over the images to segment   
        for i_d in xrange(numberImagesToSegment) :
            print("-------------  Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
            strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
            
            segmentVolume(myLiviaNet3D,
                          i_d,
                          imageNames_Val,  # Full path
                          names_Val,       # Only image name
                          groundTruthNames_Val,
                          roiNames_Val,
                          imageType,
                          applyPadding,
                          receptiveField, 
                          sampleSize_Train,
                          strideValues,
                          myLiviaNet3D.batch_Size,
                          0 # Validation (0) or testing (1)
                          )
                         
       
        print(" ********************** Validation DONE ********************** ")

        # ------ In this point the training is done at Epoch n ---------#
        # Increase number of epochs trained
        myLiviaNet3D.numberOfEpochsTrained += 1

        #  --------------- Save the model --------------- 
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR,'outputFiles')
        netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
        netFolderName  = os.path.join(netFolderName,'Networks')

        modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
 
        strFinal =  " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        print  (strFinal)

    print("................ The whole Training is done.....")
    print(" ************************************************************************************ ")
def startTesting(networkModelName, configIniName):

    padInputImagesBool = True  # from config ini
    print " ******************************************  STARTING SEGMENTATION ******************************************"

    print " **********************  Starting segmentation **********************"
    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName, 2)

    print " -------- Images to segment -------------"

    print " -------- Reading Images names for segmentation -------------"

    # -- Get list of images used for testing -- #
    (imageNames_Test,
     names_Test) = getImagesSet(myParserConfigIni.imagesFolder,
                                myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Test, gt_names_Test) = getImagesSet(
        myParserConfigIni.GroundTruthFolder,
        myParserConfigIni.indexesToSegment)  # Ground truth
    (roiNames_Test,
     roi_names_Test) = getImagesSet(myParserConfigIni.ROIFolder,
                                    myParserConfigIni.indexesToSegment)  # ROI

    # --------------- Load my LiviaNet3D object  ---------------
    print(" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Get info from the network model
    networkName = myLiviaNet3D.networkName
    folderName = myLiviaNet3D.folderName
    n_classes = myLiviaNet3D.n_classes
    sampleSize_Test = myLiviaNet3D.sampleSize_Test
    receptiveField = myLiviaNet3D.receptiveField
    outputShape = myLiviaNet3D.lastLayer.outputShapeTest[2:]
    batch_Size = myLiviaNet3D.batch_Size
    padInputImagesBool = myParserConfigIni.applyPadding
    imageType = myParserConfigIni.imageTypes
    numberImagesToSegment = len(imageNames_Test)

    strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]

    # Run over the images to segment
    for i_d in xrange(numberImagesToSegment):
        print(
            "**********************  Segmenting subject: {} ....total: {}/{}...**********************"
            .format(names_Test[i_d], str(i_d + 1), str(numberImagesToSegment)))

        segmentVolume(
            myLiviaNet3D,
            i_d,
            imageNames_Test,  # Full path
            names_Test,  # Only image name
            groundTruthNames_Test,
            roiNames_Test,
            imageType,
            padInputImagesBool,
            receptiveField,
            sampleSize_Test,
            strideValues,
            batch_Size,
            1  # Validation (0) or testing (1)
        )

    print(
        " **************************************************************************************************** "
    )
Пример #6
0
def generateNetwork(configIniName) :   

    myParserConfigIni = parserConfigIni()

    myParserConfigIni.readConfigIniFile(configIniName,0)
    print " **********************  Starting creation model **********************"
    print " ------------------------ General ------------------------ "
    print " - Network name: {}".format(myParserConfigIni.networkName)
    print " - Folder to save the outputs: {}".format(myParserConfigIni.folderName)
    print " ------------------------ CNN Architecture ------------------------  "
    print " - Number of classes: {}".format(myParserConfigIni.n_classes)
    print " - Layers: {}".format(myParserConfigIni.layers)
    print " - Kernel sizes: {}".format(myParserConfigIni.kernels)

    print " - Intermediate connected CNN layers: {}".format(myParserConfigIni.intermediate_ConnectedLayers)
   
    print " - Pooling: {}".format(myParserConfigIni.pooling_scales)
    print " - Dropout: {}".format(myParserConfigIni.dropout_Rates)
    
    def Linear():
        print " --- Activation function: Linear"
 
    def ReLU():
        print " --- Activation function: ReLU"
 
    def PReLU():
        print " --- Activation function: PReLU"

    def LeakyReLU():
        print " --- Activation function: Leaky ReLU"
                  
    printActivationFunction = {0 : Linear,
                               1 : ReLU,
                               2 : PReLU,
                               3 : LeakyReLU}

    printActivationFunction[myParserConfigIni.activationType]()
        
    def Random(layerType):
        print " --- Weights initialization (" +layerType+ " Layers): Random"
 
    def Delving(layerType):
        print " --- Weights initialization (" +layerType+ " Layers): Delving"
 
    def PreTrained(layerType):
        print " --- Weights initialization (" +layerType+ " Layers): PreTrained"
        
    printweight_Initialization_CNN = {0 : Random,
                                      1 : Delving,
                                      2 : PreTrained}
                               
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_CNN]('CNN')
    printweight_Initialization_CNN[myParserConfigIni.weight_Initialization_FCN]('FCN')

    print " ------------------------ Training Parameters ------------------------  "
    if len(myParserConfigIni.learning_rate) == 1:
        print " - Learning rate: {}".format(myParserConfigIni.learning_rate)
    else:
        for i in xrange(len(myParserConfigIni.learning_rate)):
            print " - Learning rate at layer {} : {} ".format(str(i+1),myParserConfigIni.learning_rate[i])
    
    print " - Batch size: {}".format(myParserConfigIni.batch_size)

    if myParserConfigIni.applyBatchNorm == True:
        print " - Apply batch normalization in {} epochs".format(myParserConfigIni.BatchNormEpochs)
        
    print " ------------------------ Size of samples ------------------------  "
    print " - Training: {}".format(myParserConfigIni.sampleSize_Train)
    print " - Testing: {}".format(myParserConfigIni.sampleSize_Test)

    # --------------- Create my LiviaSemiDenseNet3D object  --------------- 
    myLiviaSemiDenseNet3D = LiviaSemiDenseNet3D()
    
    # --------------- Create the whole architecture (Conv layers + fully connected layers + classification layer)  --------------- 
    myLiviaSemiDenseNet3D.createNetwork(myParserConfigIni.networkName,
                               myParserConfigIni.folderName,
                               myParserConfigIni.layers,
                               myParserConfigIni.kernels,
                               myParserConfigIni.intermediate_ConnectedLayers,
                               myParserConfigIni.n_classes,
                               myParserConfigIni.sampleSize_Train,
                               myParserConfigIni.sampleSize_Test,
                               myParserConfigIni.batch_size,
                               myParserConfigIni.applyBatchNorm,
                               myParserConfigIni.BatchNormEpochs,
                               myParserConfigIni.activationType,
                               myParserConfigIni.dropout_Rates,
                               myParserConfigIni.pooling_scales,
                               myParserConfigIni.weight_Initialization_CNN,
                               myParserConfigIni.weight_Initialization_FCN,
                               myParserConfigIni.weightsFolderName,
                               myParserConfigIni.weightsTrainedIdx,
                               myParserConfigIni.tempSoftMax
                               )
                               # TODO: Specify also the weights if pre-trained
                               
                          
    #  ---------------  Initialize all the training parameters  --------------- 
    myLiviaSemiDenseNet3D.initTrainingParameters(myParserConfigIni.costFunction,
                                        myParserConfigIni.L1_reg_C,
                                        myParserConfigIni.L2_reg_C,
                                        myParserConfigIni.learning_rate,
                                        myParserConfigIni.momentumType,
                                        myParserConfigIni.momentumValue,
                                        myParserConfigIni.momentumNormalized,
                                        myParserConfigIni.optimizerType,
                                        myParserConfigIni.rho_RMSProp,
                                        myParserConfigIni.epsilon_RMSProp
                                        )
   
    # ---------------  Compile the functions (Training/Validation/Testing) --------------- 
    myLiviaSemiDenseNet3D.compileTheanoFunctions()

    #  --------------- Save the model --------------- 
    # Generate folders to store the model
    BASE_DIR  = os.getcwd()
    path_Temp = os.path.join(BASE_DIR,'outputFiles')
    # For the networks
    netFolderName  = os.path.join(path_Temp,myParserConfigIni.folderName)
    netFolderName  = os.path.join(netFolderName,'Networks')
   
    # For the predictions
    predlFolderName    = os.path.join(path_Temp,myParserConfigIni.folderName)
    predlFolderName    = os.path.join(predlFolderName,'Pred')
    predValFolderName  = os.path.join(predlFolderName,'Validation')
    predTestFolderName = os.path.join(predlFolderName,'Testing')
   
    makeFolder(netFolderName, "Networks")
    makeFolder(predValFolderName, "to store predictions (Validation)")
    makeFolder(predTestFolderName, "to store predictions (Testing)")

    modelFileName = netFolderName + "/" + myParserConfigIni.networkName + "_Epoch0"
    dump_model_to_gzip_file(myLiviaSemiDenseNet3D, modelFileName)
    
    strFinal =  " Network model saved in " + netFolderName + " as " + myParserConfigIni.networkName + "_Epoch0"
    print  strFinal
    
    return modelFileName
Пример #7
0
def startTraining(networkModelName,configIniName):
    print " ************************************************  STARTING TRAINING **************************************************"
    print " **********************  Starting training model (Reading parameters) **********************"

    myParserConfigIni = parserConfigIni()
   
    myParserConfigIni.readConfigIniFile(configIniName,1)
    
    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print "-------- Reading Images names used in training/validation -------------"

    # -- Get list of images used for training -- #
    (imageNames_Train, names_Train)                = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForTraining)  # Images
    (imageNames_Train_Bottom, names_Train_Bottom)  = getImagesSet(myParserConfigIni.imagesFolder_Bottom,myParserConfigIni.indexesForTraining)  # Images
    (groundTruthNames_Train, gt_names_Train)       = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesForTraining) # Ground truth
    (roiNames_Train, roi_names_Train)              = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesForTraining) # ROI
    
    # -- Get list of images used for validation -- #
    (imageNames_Val, names_Val)               = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForValidation)  # Images
    (imageNames_Val_Bottom, names_Val_Bottom) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForValidation)  # Images
    (groundTruthNames_Val, gt_names_Val)      = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesForValidation) # Ground truth
    (roiNames_Val, roi_names_Val)             = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesForValidation) # ROI

    # Print names
    print " ================== Images for training ================"
    for i in range(0,len(names_Train)):
       if len(roi_names_Train) > 0:
            print(" Image({}): Top {}  |  Bottom: {}  |  GT: {}  |  ROI {} ".format(i,names_Train[i], names_Train_Bottom[i], gt_names_Train[i], roi_names_Train[i] ))
       else:
            print(" Image({}): Top {}  |  Bottom: {}  |  GT: {}  ".format(i,names_Train[i], names_Train_Bottom[i], gt_names_Train[i] ))
    print " ================== Images for validation ================"
    for i in range(0,len(names_Val)):
        if len(roi_names_Train) > 0:
            print(" Image({}): Top {}  |  Bottom  {}  |  GT: {}  |  ROI {} ".format(i,names_Val[i], names_Val_Bottom[i], gt_names_Val[i], roi_names_Val[i] ))
        else:
            print(" Image({}): Top {}  |  Bottom  {}  |  GT: {}  ".format(i,names_Val[i],  names_Val_Bottom[i], gt_names_Val[i]))
    print " ==============================================================="
   
    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Asign parameters to loaded Net
    myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
    myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    myLiviaNet3D.numberOfSamplesSupEpoch  = myParserConfigIni.numberOfSamplesSupEpoch
    myLiviaNet3D.firstEpochChangeLR  = myParserConfigIni.firstEpochChangeLR
    myLiviaNet3D.frequencyChangeLR  = myParserConfigIni.frequencyChangeLR
    
    numberOfEpochs = myLiviaNet3D.numberOfEpochs
    numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
    numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
    
    # --------------- --------------  --------------- 
    # --------------- Start TRAINING  --------------- 
    # --------------- --------------  --------------- 
    # Get sample dimension values
    receptiveField = myLiviaNet3D.receptiveField
    sampleSize_Train = myLiviaNet3D.sampleSize_Train

    trainingCost = []

    if myParserConfigIni.applyPadding == 1:
        applyPadding = True
    else:
        applyPadding = False
    
    learningRateModifiedEpoch = 0
    
    # Run over all the (remaining) epochs and subepochs
    for e_i in xrange(numberOfEpochs):
        # Recover last trained epoch
        numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
                                        
        print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))

        costsOfEpoch = []
        
        for subE_i in xrange(numberOfSubEpochs): 
            epoch_nr = subE_i+1
            print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))

            # Get all the samples that will be used in this sub-epoch
            [imagesSamplesAll,
            imagesSamplesAll_Bottom,
            gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
                                                imageNames_Train,
                                                imageNames_Train,
                                                groundTruthNames_Train,
                                                roiNames_Train,
                                                imageType,
                                                sampleSize_Train,
                                                receptiveField,
                                                applyPadding
                                                )

            # Variable that will contain weights for the cost function
            # --- In its current implementation, all the classes have the same weight
            weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
               
            numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size 
            
            myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
            myLiviaNet3D.trainingData_x_Bottom.set_value(imagesSamplesAll_Bottom, borrow=True)
            myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
                 
            costsOfBatches = []
            evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
    
            for b_i in xrange(numberBatches):
                # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
                costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
                meanBatchCostError = costErrors[0]
                costsOfBatches.append(meanBatchCostError)
                myLiviaNet3D.updateLayersMatricesBatchNorm() 

            
            #======== Calculate and Report accuracy over subepoch
            meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
            print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
            
            # Release data
            myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
            myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))

            # Get mean cost epoch
            costsOfEpoch.append(meanCostOfSubepoch)

        meanCostOfEpoch =  sum(costsOfEpoch) / float(numberOfSubEpochs)
        
        # Include the epoch cost to the main training cost and update current mean 
        trainingCost.append(meanCostOfEpoch)
        currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
        
        print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
        print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
        print(" -------------------------------------------------------- " )
        
        # ------------- Update Learning Rate if required ----------------#
        if e_i >= myLiviaNet3D.firstEpochChangeLR :
            if learningRateModifiedEpoch == 0:
                currentLR = myLiviaNet3D.learning_rate.get_value()
                newLR = currentLR / 2.0
                myLiviaNet3D.learning_rate.set_value(newLR)
                print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                learningRateModifiedEpoch = e_i
            else:
                if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
                    currentLR = myLiviaNet3D.learning_rate.get_value()
                    newLR = currentLR / 2.0
                    myLiviaNet3D.learning_rate.set_value(newLR)
                    print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                    learningRateModifiedEpoch = e_i
                
        # ---------------------- Start validation ---------------------- #
        
        numberImagesToSegment = len(imageNames_Val)
        print(" ********************** Starting validation **********************")

        # Run over the images to segment   
        for i_d in xrange(numberImagesToSegment) :
            print("-------------  Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
            strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
            
            segmentVolume(myLiviaNet3D,
                          i_d,
                          imageNames_Val,  # Full path
                          imageNames_Val_Bottom,
                          names_Val,       # Only image name
                          groundTruthNames_Val,
                          roiNames_Val,
                          imageType,
                          applyPadding,
                          receptiveField, 
                          sampleSize_Train,
                          strideValues,
                          myLiviaNet3D.batch_Size,
                          0 # Validation (0) or testing (1)
                          )
                         
       
        print(" ********************** Validation DONE ********************** ")

        # ------ In this point the training is done at Epoch n ---------#
        # Increase number of epochs trained
        myLiviaNet3D.numberOfEpochsTrained += 1

        #  --------------- Save the model --------------- 
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR,'outputFiles')
        netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
        netFolderName  = os.path.join(netFolderName,'Networks')

        modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
 
        strFinal =  " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        print  strFinal

    print("................ The whole Training is done.....")
    print(" ************************************************************************************ ")
Пример #8
0
def startTraining(model, configIniName, inIter):
    print(
        " ************************************************  STARTING TRAINING **************************************************"
    )
    print(
        " **********************  Starting training model (Reading parameters) **********************"
    )

    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName, 1)

    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print(" --- Do training in {} epochs with {} subEpochs each...".format(
        myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print(
        "-------- Reading Images names used in training/validation -------------"
    )

    # -- Get list of images used for training -- #
    (imageNames_Train, names_Train) = getImagesSet(
        myParserConfigIni.imagesFolder,
        myParserConfigIni.indexesForTraining)  # Images
    (groundTruthNames_Train, gt_names_Train) = getImagesSet(
        myParserConfigIni.GroundTruthFolder,
        myParserConfigIni.indexesForTraining)  # Ground truth

    # Print names
    print(" ================== Images for training ================")
    for i in range(0, len(names_Train)):
        print(" Image({}): {}  |  GT: {}  ".format(i, names_Train[i],
                                                   gt_names_Train[i]))

    numberOfEpochs = myParserConfigIni.numberOfEpochs
    numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch
    numberOfClass = myParserConfigIni.n_classes
    sampleSize_Train = myParserConfigIni.Patch_Size_Train
    batch_size = myParserConfigIni.batch_size
    folderName = myParserConfigIni.folderName
    timeForValidation = myParserConfigIni.timeForValidation

    # -- Get list of images used for validation -- #
    myParserConfigIni.readConfigIniFile(configIniName, 2)
    sampleSize_Test = myParserConfigIni.Patch_Size_Train
    strideValues = myParserConfigIni.strideValues

    (imageNames_Val,
     names_Val) = getImagesSet(myParserConfigIni.imagesFolder,
                               myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Val, gt_names_Val) = getImagesSet(
        myParserConfigIni.GroundTruthFolder,
        myParserConfigIni.indexesToSegment)  # Ground truth

    # Print names
    print(" ================== Images for validation ================")
    for i in range(0, len(names_Val)):
        print(" Image({}): {}  |  GT: {}  ".format(i, names_Val[i],
                                                   gt_names_Val[i]))

    print(" ===============================================================")

    optimizer = optim.Adam(model.parameters(), weight_decay=0, lr=1e-5)
    if torch.cuda.is_available():
        print('============================')
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        net = torch.nn.DataParallel(model)
        net.cuda()

    AvDice_Val = 0

    for e_i in range(numberOfEpochs):
        # Recover last trained epoch
        print(" ============== EPOCH: {}/{} =================".format(
            e_i, numberOfEpochs))
        costsOfEpoch = []
        for subE_i in range(numberOfSubEpochs):
            #epoch_nr = subE_i+1
            print(" --- SubEPOCH: {}/{}".format(subE_i, numberOfSubEpochs))
            # Get all the samples that will be used in this sub-epoch

            [imagesSamplesAll, gt_samplesAll
             ] = getSamplesSubepoch(numberOfSamplesSupEpoch, imageNames_Train,
                                    groundTruthNames_Train, imageType,
                                    sampleSize_Train)
            imagesSamplesAll = np.array(imagesSamplesAll)
            gt_samplesAll = np.array(gt_samplesAll)
            train_data = MyDataset(imagesSamplesAll,
                                   gt_samplesAll,
                                   transform=transforms.ToTensor())
            train_loader = DataLoader(train_data,
                                      batch_size,
                                      shuffle=True,
                                      num_workers=8)

            train(net, train_loader, optimizer, numberOfClass, subE_i)

        Dice = []
        if e_i + inIter > timeForValidation:
            numberImagesToSegment = len(imageNames_Val)
            print(
                " ********************** Starting validation **********************"
            )
            # Run over the images to segment

            for i_d in range(numberImagesToSegment):
                print(
                    "**********************  Segmenting subject: {} ....total: {}/{}...**********************"
                    .format(names_Val[i_d], str(i_d + 1),
                            str(numberImagesToSegment)))

                [everyROIDice, tmpDice] = segmentVolume(
                    net,
                    folderName,
                    i_d,
                    imageNames_Val,
                    names_Val,
                    groundTruthNames_Val,
                    imageType,
                    sampleSize_Test,
                    strideValues,
                    numberOfClass,
                    batch_size,
                    0  # Validation (0) or testing (1)
                )

                Dice.append(tmpDice)
            print(
                " ********************** Validation DONE ********************** "
            )

            if sum(Dice) / len(Dice) >= AvDice_Val:
                AvDice_Val = sum(Dice) / len(Dice)
                continue
            else:
                break
        #  --------------- Save the model ---------------
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR, 'outputFiles')
        netFolderName = os.path.join(path_Temp, folderName)
        netFolderName = os.path.join(netFolderName, 'Networks')
        dirMake(netFolderName)

        modelFileName = netFolderName + "/FCN_Epoch" + str(e_i + inIter + 1)
        torch.save(net, modelFileName)

        strFinal = " Network model saved in " + netFolderName + " as FCN_Epoch" + str(
            e_i + inIter + 1)
        print(strFinal)

    print("................ The whole Training is done..............")
    print(
        " ************************************************************************************ "
    )
Пример #9
0
def startTesting(networkModelName,
                 configIniName
                 ) :

    padInputImagesBool = True # from config ini
    print " ******************************************  STARTING SEGMENTATION ******************************************"

    print " **********************  Starting segmentation **********************"
    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName,2)
    

    print " -------- Images to segment -------------"

    print " -------- Reading Images names for segmentation -------------"
    
    # -- Get list of images used for testing -- #
    (imageNames_Test, names_Test) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesToSegment)  # Images
    (imageNames_Test_Bottom, names_Test_Bottom) = getImagesSet(myParserConfigIni.imagesFolder_Bottom,myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Test, gt_names_Test) = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesToSegment) # Ground truth
    (roiNames_Test, roi_names_Test) = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesToSegment) # ROI

    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaSemiDenseNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Get info from the network model        
    networkName        = myLiviaNet3D.networkName
    folderName         = myLiviaNet3D.folderName
    n_classes          = myLiviaNet3D.n_classes
    sampleSize_Test    = myLiviaNet3D.sampleSize_Test
    receptiveField     = myLiviaNet3D.receptiveField  
    outputShape        = myLiviaNet3D.lastLayer.outputShapeTest[2:] 
    batch_Size         = myLiviaNet3D.batch_Size
    padInputImagesBool = myParserConfigIni.applyPadding
    imageType          = myParserConfigIni.imageTypes
    numberImagesToSegment = len(imageNames_Test)
    
    strideValues = myLiviaSemiDenseNet3D.lastLayer.outputShapeTest[2:]

    # Run over the images to segment   
    for i_d in xrange(numberImagesToSegment) :
        print("**********************  Segmenting subject: {} ....total: {}/{}...**********************".format(names_Test[i_d],str(i_d+1),str(numberImagesToSegment)))
        
        segmentVolume(myLiviaSemiDenseNet3D,
                  i_d,
                  imageNames_Test,  # Full path
                  imageNames_Test_Bottom,
                  names_Test,       # Only image name
                  groundTruthNames_Test,
                  roiNames_Test,
                  imageType,
                  padInputImagesBool,
                  receptiveField, 
                  sampleSize_Test,
                  strideValues,
                  batch_Size,
                  1 # Validation (0) or testing (1)
                  )
                         
       
    print(" **************************************************************************************************** ")