示例#1
0
def startTesting(FCNname,configIniName) :

    #padInputImagesBool = True # from config ini
    print (" ******************************************  STARTING SEGMENTATION ******************************************")

    print (" **********************  Starting segmentation **********************")
    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName,2)
    

    print (" -------- Images to segment -------------")

    print (" -------- Reading Images names for segmentation -------------")
    
    # -- Get list of images used for testing -- #
    (imageNames_Test, names_Test) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Test, gt_names_Test) = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesToSegment) # Ground truth

    print (" ================== Images for training ================")
    for i in range(0,len(names_Test)):
        print(" Image({}): {}  |  GT: {}  ".format(i,names_Test[i], gt_names_Test[i] ))

    folderName            = myParserConfigIni.folderName
    batch_size            = myParserConfigIni.batch_size
    sampleSize_Test       = myParserConfigIni.Patch_Size_Test
    imageType             = myParserConfigIni.imageTypes
    strideValues          = myParserConfigIni.strideValues
    numberOfClass         = myParserConfigIni.n_classes
    numberImagesToSegment = len(imageNames_Test)

    # --------------- Load my FCN object  --------------- 
    print (" ... Loading model from {}".format(FCNname))
    model = torch.load(FCNname)
 
    print (" ... Network architecture successfully loaded....")

    for i_d in range(numberImagesToSegment) :
        print("**********************  Segmenting subject: {} ....total: {}/{}...**********************".format(names_Test[i_d],str(i_d+1),str(numberImagesToSegment)))
        
        segmentVolume(model,
                      folderName,
                      i_d,
                      imageNames_Test,
                      names_Test,
                      groundTruthNames_Test,
                      imageType,
                      sampleSize_Test,
                      strideValues,
                      numberOfClass,
                      batch_size,
                      1 # Validation (0) or testing (1)
                      )      
       
    print(" **************************************************************************************************** ")
def startTraining(networkModelName,configIniName):
    print (" ************************************************  STARTING TRAINING **************************************************")
    print (" **********************  Starting training model (Reading parameters) **********************")

    myParserConfigIni = parserConfigIni()
   
    myParserConfigIni.readConfigIniFile(configIniName,1)
    
    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print ("-------- Reading Images names used in training/validation -------------")
##-----##
    # from sklearn.model_selection import KFold
    # import numpy as np
    # y1 = myParserConfigIni.indexesForTraining
    # #x1 = myParserConfigIni.indexesForValidation
    # kf = KFold(n_splits= 5)
    #
    # for train_index, test_index in kf.split(y1):
    #     print("TRAIN:", train_index, "TEST:", test_index)
    #     y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##-----##
    # from sklearn.model_selection import LeavePOut
    # lpo = LeavePOut(p=5)
    # y1 = myParserConfigIni.indexesForTraining
    # for train, test in lpo.split(y1):
    #     y, x = np.array(y1)[train], np.array(y1)[test]
##-----train##
    from sklearn.cross_validation import LeaveOneOut
    loo = LeaveOneOut(4)
    y1 = myParserConfigIni.indexesForTraining
    x1 = myParserConfigIni.indexesForValidation
    for train_index, test_index in loo:
        print("TRAIN:", train_index, "TEST:", test_index)
        y, x = np.array(y1)[train_index], np.array(y1)[test_index]
##------he
    # from sklearn.model_selection import train_test_split
    #     X_train, X_test, Y_train, Y_test = train_test_split(DataX, DataY, test_size=0.2)

    # -- Get list of images used for training -- #

    (imageNames_Train, names_Train)          = getImagesSet(myParserConfigIni.imagesFolder,y)  # Images
    (groundTruthNames_Train, gt_names_Train) = getImagesSet(myParserConfigIni.GroundTruthFolder,y) # Ground truth
    (roiNames_Train, roi_names_Train)        = getImagesSet(myParserConfigIni.ROIFolder,y) # ROI

    # -- Get list of images used for validation -- #
    (imageNames_Val, names_Val)          = getImagesSet(myParserConfigIni.imagesFolder,x)  # Images
    (groundTruthNames_Val, gt_names_Val) = getImagesSet(myParserConfigIni.GroundTruthFolder,x) # Ground truth
    (roiNames_Val, roi_names_Val)        = getImagesSet(myParserConfigIni.ROIFolder,x) # ROI

    # Print names
    print (" ================== Images for training ================")
    for i in range(0,len(names_Train)):
       if len(roi_names_Train) > 0:
            print(" Image({}): {}  |  GT: {}  |  ROI {} ".format(i,names_Train[i], gt_names_Train[i], roi_names_Train[i] ))
       else:
            print(" Image({}): {}  |  GT: {}  ".format(i,names_Train[i], gt_names_Train[i] ))
    print (" ================== Images for validation ================")
    for i in range(0,len(names_Val)):
        if len(roi_names_Train) > 0:
            print(" Image({}): {}  |  GT: {}  |  ROI {} ".format(i,names_Val[i], gt_names_Val[i], roi_names_Val[i] ))
        else:
            print(" Image({}): {}  |  GT: {}  ".format(i,names_Val[i], gt_names_Val[i]))
    print (" ===============================================================")
   
    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print (" ... Network architecture successfully loaded....")

    # Asign parameters to loaded Net
    myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
    myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    myLiviaNet3D.numberOfSamplesSupEpoch  = myParserConfigIni.numberOfSamplesSupEpoch
    myLiviaNet3D.firstEpochChangeLR  = myParserConfigIni.firstEpochChangeLR
    myLiviaNet3D.frequencyChangeLR  = myParserConfigIni.frequencyChangeLR
    
    numberOfEpochs = myLiviaNet3D.numberOfEpochs
    numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
    numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
    
    # --------------- --------------  --------------- 
    # --------------- Start TRAINING  --------------- 
    # --------------- --------------  --------------- 
    # Get sample dimension values
    receptiveField = myLiviaNet3D.receptiveField
    sampleSize_Train = myLiviaNet3D.sampleSize_Train

    trainingCost = []

    if myParserConfigIni.applyPadding == 1:
        applyPadding = True
    else:
        applyPadding = False
    
    learningRateModifiedEpoch = 0
    
    # Run over all the (remaining) epochs and subepochs
    for e_i in xrange(numberOfEpochs):
        # Recover last trained epoch
        numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
                                        
        print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))

        costsOfEpoch = []
        
        for subE_i in xrange(numberOfSubEpochs): 
            epoch_nr = subE_i+1
            print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))

            # Get all the samples that will be used in this sub-epoch
            [imagesSamplesAll,
            gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
                                                imageNames_Train,
                                                groundTruthNames_Train,
                                                roiNames_Train,
                                                imageType,
                                                sampleSize_Train,
                                                receptiveField,
                                                applyPadding
                                                )

            # Variable that will contain weights for the cost function
            # --- In its current implementation, all the classes have the same weight
            weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
               
            numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size 
            
            myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
            myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
                 
            costsOfBatches = []
            evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
    
            for b_i in xrange(numberBatches):
                # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
                costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
                meanBatchCostError = costErrors[0]
                costsOfBatches.append(meanBatchCostError)
                myLiviaNet3D.updateLayersMatricesBatchNorm() 

            
            #======== Calculate and Report accuracy over subepoch
            meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
            print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
            
            # Release data
            myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
            myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))

            # Get mean cost epoch
            costsOfEpoch.append(meanCostOfSubepoch)

        meanCostOfEpoch =  sum(costsOfEpoch) / float(numberOfSubEpochs)
        
        # Include the epoch cost to the main training cost and update current mean 
        trainingCost.append(meanCostOfEpoch)
        currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
        
        print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
        print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
        print(" -------------------------------------------------------- " )
        
        # ------------- Update Learning Rate if required ----------------#

        if e_i >= myLiviaNet3D.firstEpochChangeLR :
            if learningRateModifiedEpoch == 0:
                currentLR = myLiviaNet3D.learning_rate.get_value()
                newLR = currentLR / 2.0
                myLiviaNet3D.learning_rate.set_value(newLR)
                print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                learningRateModifiedEpoch = e_i
            else:
                if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
                    currentLR = myLiviaNet3D.learning_rate.get_value()
                    newLR = currentLR / 2.0
                    myLiviaNet3D.learning_rate.set_value(newLR)
                    print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                    learningRateModifiedEpoch = e_i
                
        # ---------------------- Start validation ---------------------- #
        
        numberImagesToSegment = len(imageNames_Val)
        print(" ********************** Starting validation **********************")

        # Run over the images to segment   
        for i_d in xrange(numberImagesToSegment) :
            print("-------------  Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
            strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
            
            segmentVolume(myLiviaNet3D,
                          i_d,
                          imageNames_Val,  # Full path
                          names_Val,       # Only image name
                          groundTruthNames_Val,
                          roiNames_Val,
                          imageType,
                          applyPadding,
                          receptiveField, 
                          sampleSize_Train,
                          strideValues,
                          myLiviaNet3D.batch_Size,
                          0 # Validation (0) or testing (1)
                          )
                         
       
        print(" ********************** Validation DONE ********************** ")

        # ------ In this point the training is done at Epoch n ---------#
        # Increase number of epochs trained
        myLiviaNet3D.numberOfEpochsTrained += 1

        #  --------------- Save the model --------------- 
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR,'outputFiles')
        netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
        netFolderName  = os.path.join(netFolderName,'Networks')

        modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
 
        strFinal =  " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        print  (strFinal)

    print("................ The whole Training is done.....")
    print(" ************************************************************************************ ")
def startTesting(networkModelName, configIniName):

    padInputImagesBool = True  # from config ini
    print " ******************************************  STARTING SEGMENTATION ******************************************"

    print " **********************  Starting segmentation **********************"
    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName, 2)

    print " -------- Images to segment -------------"

    print " -------- Reading Images names for segmentation -------------"

    # -- Get list of images used for testing -- #
    (imageNames_Test,
     names_Test) = getImagesSet(myParserConfigIni.imagesFolder,
                                myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Test, gt_names_Test) = getImagesSet(
        myParserConfigIni.GroundTruthFolder,
        myParserConfigIni.indexesToSegment)  # Ground truth
    (roiNames_Test,
     roi_names_Test) = getImagesSet(myParserConfigIni.ROIFolder,
                                    myParserConfigIni.indexesToSegment)  # ROI

    # --------------- Load my LiviaNet3D object  ---------------
    print(" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Get info from the network model
    networkName = myLiviaNet3D.networkName
    folderName = myLiviaNet3D.folderName
    n_classes = myLiviaNet3D.n_classes
    sampleSize_Test = myLiviaNet3D.sampleSize_Test
    receptiveField = myLiviaNet3D.receptiveField
    outputShape = myLiviaNet3D.lastLayer.outputShapeTest[2:]
    batch_Size = myLiviaNet3D.batch_Size
    padInputImagesBool = myParserConfigIni.applyPadding
    imageType = myParserConfigIni.imageTypes
    numberImagesToSegment = len(imageNames_Test)

    strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]

    # Run over the images to segment
    for i_d in xrange(numberImagesToSegment):
        print(
            "**********************  Segmenting subject: {} ....total: {}/{}...**********************"
            .format(names_Test[i_d], str(i_d + 1), str(numberImagesToSegment)))

        segmentVolume(
            myLiviaNet3D,
            i_d,
            imageNames_Test,  # Full path
            names_Test,  # Only image name
            groundTruthNames_Test,
            roiNames_Test,
            imageType,
            padInputImagesBool,
            receptiveField,
            sampleSize_Test,
            strideValues,
            batch_Size,
            1  # Validation (0) or testing (1)
        )

    print(
        " **************************************************************************************************** "
    )
示例#4
0
def startTraining(networkModelName,configIniName):
    print " ************************************************  STARTING TRAINING **************************************************"
    print " **********************  Starting training model (Reading parameters) **********************"

    myParserConfigIni = parserConfigIni()
   
    myParserConfigIni.readConfigIniFile(configIniName,1)
    
    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print (" --- Do training in {} epochs with {} subEpochs each...".format(myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print "-------- Reading Images names used in training/validation -------------"

    # -- Get list of images used for training -- #
    (imageNames_Train, names_Train)                = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForTraining)  # Images
    (imageNames_Train_Bottom, names_Train_Bottom)  = getImagesSet(myParserConfigIni.imagesFolder_Bottom,myParserConfigIni.indexesForTraining)  # Images
    (groundTruthNames_Train, gt_names_Train)       = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesForTraining) # Ground truth
    (roiNames_Train, roi_names_Train)              = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesForTraining) # ROI
    
    # -- Get list of images used for validation -- #
    (imageNames_Val, names_Val)               = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForValidation)  # Images
    (imageNames_Val_Bottom, names_Val_Bottom) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesForValidation)  # Images
    (groundTruthNames_Val, gt_names_Val)      = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesForValidation) # Ground truth
    (roiNames_Val, roi_names_Val)             = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesForValidation) # ROI

    # Print names
    print " ================== Images for training ================"
    for i in range(0,len(names_Train)):
       if len(roi_names_Train) > 0:
            print(" Image({}): Top {}  |  Bottom: {}  |  GT: {}  |  ROI {} ".format(i,names_Train[i], names_Train_Bottom[i], gt_names_Train[i], roi_names_Train[i] ))
       else:
            print(" Image({}): Top {}  |  Bottom: {}  |  GT: {}  ".format(i,names_Train[i], names_Train_Bottom[i], gt_names_Train[i] ))
    print " ================== Images for validation ================"
    for i in range(0,len(names_Val)):
        if len(roi_names_Train) > 0:
            print(" Image({}): Top {}  |  Bottom  {}  |  GT: {}  |  ROI {} ".format(i,names_Val[i], names_Val_Bottom[i], gt_names_Val[i], roi_names_Val[i] ))
        else:
            print(" Image({}): Top {}  |  Bottom  {}  |  GT: {}  ".format(i,names_Val[i],  names_Val_Bottom[i], gt_names_Val[i]))
    print " ==============================================================="
   
    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Asign parameters to loaded Net
    myLiviaNet3D.numberOfEpochs = myParserConfigIni.numberOfEpochs
    myLiviaNet3D.numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    myLiviaNet3D.numberOfSamplesSupEpoch  = myParserConfigIni.numberOfSamplesSupEpoch
    myLiviaNet3D.firstEpochChangeLR  = myParserConfigIni.firstEpochChangeLR
    myLiviaNet3D.frequencyChangeLR  = myParserConfigIni.frequencyChangeLR
    
    numberOfEpochs = myLiviaNet3D.numberOfEpochs
    numberOfSubEpochs = myLiviaNet3D.numberOfSubEpochs
    numberOfSamplesSupEpoch = myLiviaNet3D.numberOfSamplesSupEpoch
    
    # --------------- --------------  --------------- 
    # --------------- Start TRAINING  --------------- 
    # --------------- --------------  --------------- 
    # Get sample dimension values
    receptiveField = myLiviaNet3D.receptiveField
    sampleSize_Train = myLiviaNet3D.sampleSize_Train

    trainingCost = []

    if myParserConfigIni.applyPadding == 1:
        applyPadding = True
    else:
        applyPadding = False
    
    learningRateModifiedEpoch = 0
    
    # Run over all the (remaining) epochs and subepochs
    for e_i in xrange(numberOfEpochs):
        # Recover last trained epoch
        numberOfEpochsTrained = myLiviaNet3D.numberOfEpochsTrained
                                        
        print(" ============== EPOCH: {}/{} =================".format(numberOfEpochsTrained+1,numberOfEpochs))

        costsOfEpoch = []
        
        for subE_i in xrange(numberOfSubEpochs): 
            epoch_nr = subE_i+1
            print (" --- SubEPOCH: {}/{}".format(epoch_nr,myLiviaNet3D.numberOfSubEpochs))

            # Get all the samples that will be used in this sub-epoch
            [imagesSamplesAll,
            imagesSamplesAll_Bottom,
            gt_samplesAll] = getSamplesSubepoch(numberOfSamplesSupEpoch,
                                                imageNames_Train,
                                                imageNames_Train,
                                                groundTruthNames_Train,
                                                roiNames_Train,
                                                imageType,
                                                sampleSize_Train,
                                                receptiveField,
                                                applyPadding
                                                )

            # Variable that will contain weights for the cost function
            # --- In its current implementation, all the classes have the same weight
            weightsCostFunction = np.ones(myLiviaNet3D.n_classes, dtype='float32')
               
            numberBatches = len(imagesSamplesAll) / myLiviaNet3D.batch_Size 
            
            myLiviaNet3D.trainingData_x.set_value(imagesSamplesAll, borrow=True)
            myLiviaNet3D.trainingData_x_Bottom.set_value(imagesSamplesAll_Bottom, borrow=True)
            myLiviaNet3D.trainingData_y.set_value(gt_samplesAll, borrow=True)
                 
            costsOfBatches = []
            evalResultsSubepoch = np.zeros([ myLiviaNet3D.n_classes, 4 ], dtype="int32")
    
            for b_i in xrange(numberBatches):
                # TODO: Make a line that adds a point at each trained batch (Or percentage being updated)
                costErrors = myLiviaNet3D.networkModel_Train(b_i, weightsCostFunction)
                meanBatchCostError = costErrors[0]
                costsOfBatches.append(meanBatchCostError)
                myLiviaNet3D.updateLayersMatricesBatchNorm() 

            
            #======== Calculate and Report accuracy over subepoch
            meanCostOfSubepoch = sum(costsOfBatches) / float(numberBatches)
            print(" ---------- Cost of this subEpoch: {}".format(meanCostOfSubepoch))
            
            # Release data
            myLiviaNet3D.trainingData_x.set_value(np.zeros([1,1,1,1,1], dtype="float32"))
            myLiviaNet3D.trainingData_y.set_value(np.zeros([1,1,1,1], dtype="float32"))

            # Get mean cost epoch
            costsOfEpoch.append(meanCostOfSubepoch)

        meanCostOfEpoch =  sum(costsOfEpoch) / float(numberOfSubEpochs)
        
        # Include the epoch cost to the main training cost and update current mean 
        trainingCost.append(meanCostOfEpoch)
        currentMeanCost = sum(trainingCost) / float(str( e_i + 1))
        
        print(" ---------- Training on Epoch #" + str(e_i) + " finished ----------" )
        print(" ---------- Cost of Epoch: {} / Mean training error {}".format(meanCostOfEpoch,currentMeanCost))
        print(" -------------------------------------------------------- " )
        
        # ------------- Update Learning Rate if required ----------------#
        if e_i >= myLiviaNet3D.firstEpochChangeLR :
            if learningRateModifiedEpoch == 0:
                currentLR = myLiviaNet3D.learning_rate.get_value()
                newLR = currentLR / 2.0
                myLiviaNet3D.learning_rate.set_value(newLR)
                print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                learningRateModifiedEpoch = e_i
            else:
                if (e_i) == (learningRateModifiedEpoch + myLiviaNet3D.frequencyChangeLR):
                    currentLR = myLiviaNet3D.learning_rate.get_value()
                    newLR = currentLR / 2.0
                    myLiviaNet3D.learning_rate.set_value(newLR)
                    print(" ... Learning rate has been changed from {} to {}".format(currentLR, newLR))
                    learningRateModifiedEpoch = e_i
                
        # ---------------------- Start validation ---------------------- #
        
        numberImagesToSegment = len(imageNames_Val)
        print(" ********************** Starting validation **********************")

        # Run over the images to segment   
        for i_d in xrange(numberImagesToSegment) :
            print("-------------  Segmenting subject: {} ....total: {}/{}... -------------".format(names_Val[i_d],str(i_d+1),str(numberImagesToSegment)))
            strideValues = myLiviaNet3D.lastLayer.outputShapeTest[2:]
            
            segmentVolume(myLiviaNet3D,
                          i_d,
                          imageNames_Val,  # Full path
                          imageNames_Val_Bottom,
                          names_Val,       # Only image name
                          groundTruthNames_Val,
                          roiNames_Val,
                          imageType,
                          applyPadding,
                          receptiveField, 
                          sampleSize_Train,
                          strideValues,
                          myLiviaNet3D.batch_Size,
                          0 # Validation (0) or testing (1)
                          )
                         
       
        print(" ********************** Validation DONE ********************** ")

        # ------ In this point the training is done at Epoch n ---------#
        # Increase number of epochs trained
        myLiviaNet3D.numberOfEpochsTrained += 1

        #  --------------- Save the model --------------- 
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR,'outputFiles')
        netFolderName = os.path.join(path_Temp,myLiviaNet3D.folderName)
        netFolderName  = os.path.join(netFolderName,'Networks')

        modelFileName = netFolderName + "/" + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        dump_model_to_gzip_file(myLiviaNet3D, modelFileName)
 
        strFinal =  " Network model saved in " + netFolderName + " as " + myLiviaNet3D.networkName + "_Epoch" + str (myLiviaNet3D.numberOfEpochsTrained)
        print  strFinal

    print("................ The whole Training is done.....")
    print(" ************************************************************************************ ")
示例#5
0
def startTraining(model, configIniName, inIter):
    print(
        " ************************************************  STARTING TRAINING **************************************************"
    )
    print(
        " **********************  Starting training model (Reading parameters) **********************"
    )

    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName, 1)

    # Image type (0: Nifti, 1: Matlab)
    imageType = myParserConfigIni.imageTypesTrain

    print(" --- Do training in {} epochs with {} subEpochs each...".format(
        myParserConfigIni.numberOfEpochs, myParserConfigIni.numberOfSubEpochs))
    print(
        "-------- Reading Images names used in training/validation -------------"
    )

    # -- Get list of images used for training -- #
    (imageNames_Train, names_Train) = getImagesSet(
        myParserConfigIni.imagesFolder,
        myParserConfigIni.indexesForTraining)  # Images
    (groundTruthNames_Train, gt_names_Train) = getImagesSet(
        myParserConfigIni.GroundTruthFolder,
        myParserConfigIni.indexesForTraining)  # Ground truth

    # Print names
    print(" ================== Images for training ================")
    for i in range(0, len(names_Train)):
        print(" Image({}): {}  |  GT: {}  ".format(i, names_Train[i],
                                                   gt_names_Train[i]))

    numberOfEpochs = myParserConfigIni.numberOfEpochs
    numberOfSubEpochs = myParserConfigIni.numberOfSubEpochs
    numberOfSamplesSupEpoch = myParserConfigIni.numberOfSamplesSupEpoch
    numberOfClass = myParserConfigIni.n_classes
    sampleSize_Train = myParserConfigIni.Patch_Size_Train
    batch_size = myParserConfigIni.batch_size
    folderName = myParserConfigIni.folderName
    timeForValidation = myParserConfigIni.timeForValidation

    # -- Get list of images used for validation -- #
    myParserConfigIni.readConfigIniFile(configIniName, 2)
    sampleSize_Test = myParserConfigIni.Patch_Size_Train
    strideValues = myParserConfigIni.strideValues

    (imageNames_Val,
     names_Val) = getImagesSet(myParserConfigIni.imagesFolder,
                               myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Val, gt_names_Val) = getImagesSet(
        myParserConfigIni.GroundTruthFolder,
        myParserConfigIni.indexesToSegment)  # Ground truth

    # Print names
    print(" ================== Images for validation ================")
    for i in range(0, len(names_Val)):
        print(" Image({}): {}  |  GT: {}  ".format(i, names_Val[i],
                                                   gt_names_Val[i]))

    print(" ===============================================================")

    optimizer = optim.Adam(model.parameters(), weight_decay=0, lr=1e-5)
    if torch.cuda.is_available():
        print('============================')
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        net = torch.nn.DataParallel(model)
        net.cuda()

    AvDice_Val = 0

    for e_i in range(numberOfEpochs):
        # Recover last trained epoch
        print(" ============== EPOCH: {}/{} =================".format(
            e_i, numberOfEpochs))
        costsOfEpoch = []
        for subE_i in range(numberOfSubEpochs):
            #epoch_nr = subE_i+1
            print(" --- SubEPOCH: {}/{}".format(subE_i, numberOfSubEpochs))
            # Get all the samples that will be used in this sub-epoch

            [imagesSamplesAll, gt_samplesAll
             ] = getSamplesSubepoch(numberOfSamplesSupEpoch, imageNames_Train,
                                    groundTruthNames_Train, imageType,
                                    sampleSize_Train)
            imagesSamplesAll = np.array(imagesSamplesAll)
            gt_samplesAll = np.array(gt_samplesAll)
            train_data = MyDataset(imagesSamplesAll,
                                   gt_samplesAll,
                                   transform=transforms.ToTensor())
            train_loader = DataLoader(train_data,
                                      batch_size,
                                      shuffle=True,
                                      num_workers=8)

            train(net, train_loader, optimizer, numberOfClass, subE_i)

        Dice = []
        if e_i + inIter > timeForValidation:
            numberImagesToSegment = len(imageNames_Val)
            print(
                " ********************** Starting validation **********************"
            )
            # Run over the images to segment

            for i_d in range(numberImagesToSegment):
                print(
                    "**********************  Segmenting subject: {} ....total: {}/{}...**********************"
                    .format(names_Val[i_d], str(i_d + 1),
                            str(numberImagesToSegment)))

                [everyROIDice, tmpDice] = segmentVolume(
                    net,
                    folderName,
                    i_d,
                    imageNames_Val,
                    names_Val,
                    groundTruthNames_Val,
                    imageType,
                    sampleSize_Test,
                    strideValues,
                    numberOfClass,
                    batch_size,
                    0  # Validation (0) or testing (1)
                )

                Dice.append(tmpDice)
            print(
                " ********************** Validation DONE ********************** "
            )

            if sum(Dice) / len(Dice) >= AvDice_Val:
                AvDice_Val = sum(Dice) / len(Dice)
                continue
            else:
                break
        #  --------------- Save the model ---------------
        BASE_DIR = os.getcwd()
        path_Temp = os.path.join(BASE_DIR, 'outputFiles')
        netFolderName = os.path.join(path_Temp, folderName)
        netFolderName = os.path.join(netFolderName, 'Networks')
        dirMake(netFolderName)

        modelFileName = netFolderName + "/FCN_Epoch" + str(e_i + inIter + 1)
        torch.save(net, modelFileName)

        strFinal = " Network model saved in " + netFolderName + " as FCN_Epoch" + str(
            e_i + inIter + 1)
        print(strFinal)

    print("................ The whole Training is done..............")
    print(
        " ************************************************************************************ "
    )
示例#6
0
def startTesting(networkModelName,
                 configIniName
                 ) :

    padInputImagesBool = True # from config ini
    print " ******************************************  STARTING SEGMENTATION ******************************************"

    print " **********************  Starting segmentation **********************"
    myParserConfigIni = parserConfigIni()
    myParserConfigIni.readConfigIniFile(configIniName,2)
    

    print " -------- Images to segment -------------"

    print " -------- Reading Images names for segmentation -------------"
    
    # -- Get list of images used for testing -- #
    (imageNames_Test, names_Test) = getImagesSet(myParserConfigIni.imagesFolder,myParserConfigIni.indexesToSegment)  # Images
    (imageNames_Test_Bottom, names_Test_Bottom) = getImagesSet(myParserConfigIni.imagesFolder_Bottom,myParserConfigIni.indexesToSegment)  # Images
    (groundTruthNames_Test, gt_names_Test) = getImagesSet(myParserConfigIni.GroundTruthFolder,myParserConfigIni.indexesToSegment) # Ground truth
    (roiNames_Test, roi_names_Test) = getImagesSet(myParserConfigIni.ROIFolder,myParserConfigIni.indexesToSegment) # ROI

    # --------------- Load my LiviaNet3D object  --------------- 
    print (" ... Loading model from {}".format(networkModelName))
    myLiviaSemiDenseNet3D = load_model_from_gzip_file(networkModelName)
    print " ... Network architecture successfully loaded...."

    # Get info from the network model        
    networkName        = myLiviaNet3D.networkName
    folderName         = myLiviaNet3D.folderName
    n_classes          = myLiviaNet3D.n_classes
    sampleSize_Test    = myLiviaNet3D.sampleSize_Test
    receptiveField     = myLiviaNet3D.receptiveField  
    outputShape        = myLiviaNet3D.lastLayer.outputShapeTest[2:] 
    batch_Size         = myLiviaNet3D.batch_Size
    padInputImagesBool = myParserConfigIni.applyPadding
    imageType          = myParserConfigIni.imageTypes
    numberImagesToSegment = len(imageNames_Test)
    
    strideValues = myLiviaSemiDenseNet3D.lastLayer.outputShapeTest[2:]

    # Run over the images to segment   
    for i_d in xrange(numberImagesToSegment) :
        print("**********************  Segmenting subject: {} ....total: {}/{}...**********************".format(names_Test[i_d],str(i_d+1),str(numberImagesToSegment)))
        
        segmentVolume(myLiviaSemiDenseNet3D,
                  i_d,
                  imageNames_Test,  # Full path
                  imageNames_Test_Bottom,
                  names_Test,       # Only image name
                  groundTruthNames_Test,
                  roiNames_Test,
                  imageType,
                  padInputImagesBool,
                  receptiveField, 
                  sampleSize_Test,
                  strideValues,
                  batch_Size,
                  1 # Validation (0) or testing (1)
                  )
                         
       
    print(" **************************************************************************************************** ")