예제 #1
0
def test(argv=None):
    tf.reset_default_graph()
    keep_prob = tf.placeholder(tf.float32,
                               name="keep_probabilty")  # Dropout probability
    image = tf.placeholder(tf.float32,
                           shape=[None, None, None, 3],
                           name="input_image")
    pred_annotation, logits = inference(image, keep_prob)
    TestReader = Data_Reader.Data_Reader(Pred_Dir)
    sess1 = tf.Session()  #Start Tensorflow session

    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess1.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if train model exist restore it
        saver.restore(sess1, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path +
              " See Train.py for creating train network ")
        sys.exit()
    print("Running Predictions:")
    print("Saving output to:" + Pred_Dir)
    Images = TestReader.ReadNextBatchClean()
    LabelPred = sess1.run(pred_annotation,
                          feed_dict={
                              image: Images,
                              keep_prob: 1.0
                          })
    pred = np.squeeze(LabelPred)
    print(pred.shape)
    plt.imshow(pred)
예제 #2
0
def main(argv=None):
    tf.reset_default_graph()
    keep_prob = tf.placeholder(tf.float32, name="keep_probabilty")  # Dropout probability
    # .........................Placeholders for input image and labels...........................................................................................
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image")  # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
    # .........................Build FCN Net...............................................................................................
    Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)  # Create class for the network
    Net.build(image, NUM_CLASSES, keep_prob)  # Create the net and load intial weights
    #    # -------------------------Data reader for validation image-----------------------------------------------------------------------------------------------------------------------------

    ValidReader = Data_Reader.Data_Reader(Image_Dir,GTLabelDir=Label_Dir, BatchSize=Batch_Size) # build reader that will be used to load images and labels from validation set

    #........................................................................................................................
    sess = tf.Session()  # Start Tensorflow session
    #--------Load trained model--------------------------------------------------------------------------------------------------
    print("Setting up Saver...")
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else: # if
        print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path+"See TRAIN.py for training")
        sys.exit()
 #--------------------Sum of intersection from all validation images for all classes and sum of union for all images and all classes----------------------------------------------------------------------------------
    Union = np.float64(np.zeros(len(Classes))) #Sum of union
    Intersection =  np.float64(np.zeros(len(Classes))) #Sum of Intersection
    fim = 0
    print("Start Evaluating intersection over union for "+str(ValidReader.NumFiles)+" images")
 #===========================GO over all validation images and caclulate IOU============================================================
    while (ValidReader.itr<ValidReader.NumFiles):
        print(str(fim*100.0/ValidReader.NumFiles)+"%")
        fim+=1

#.........................................Run Predictin/inference on validation................................................................................
        Images,  GTLabels = ValidReader.ReadNextBatchClean()  # Read images  and ground truth annotation
        #Predict annotation using net
        PredictedLabels= sess.run(Net.Pred,feed_dict={image: Images,keep_prob: 1.0})
#............................Calculate Intersection and union for prediction...............................................................

#        print("-------------------------IOU----------------------------------------")
        CIOU,CU=IOU.GetIOU(PredictedLabels,GTLabels.squeeze(),len(Classes),Classes) #Calculate intersection over union
        Intersection+=CIOU*CU
        Union+=CU

#-----------------------------------------Print results--------------------------------------------------------------------------------------
    print("---------------------------Mean Prediction----------------------------------------")
    print("---------------------IOU=Intersection Over Inion----------------------------------")
    for i in range(len(Classes)):
        if Union[i]>0: print(Classes[i]+"\t"+str(Intersection[i]/Union[i]))
def main(argv=None):
      # .........................Placeholders for input image and labels........................................................................
    keep_prob = tf.placeholder(tf.float32, name="keep_probabilty")  # Dropout probability
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image")  # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB

    # -------------------------Build Net----------------------------------------------------------------------------------------------
    Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)  # Create class instance for the net
    Net.build(image, NUM_CLASSES, keep_prob)  # Build net and load intial weights (weights before training)
    # -------------------------Data reader for validation/testing images-----------------------------------------------------------------------------------------------------------------------------
    ValidReader = Data_Reader.Data_Reader(Image_Dir,  BatchSize=1)
    #-------------------------Load Trained model if you dont have trained model see: Train.py-----------------------------------------------------------------------------------------------------------------------------

    sess = tf.Session() #Start Tensorflow session

    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        print("ERROR NO TRAINED MODEL IN: "+ckpt.model_checkpoint_path+" See Train.py for creating train network ")
        sys.exit()

#--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton---------------------------------------------------------------------------------------------------------------------------------------------

    if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir)
    if not os.path.exists(Pred_Dir+"/OverLay"): os.makedirs(Pred_Dir+"/OverLay")
    if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label")

    
    print("Running Predictions:")
    print("Saving output to:" + Pred_Dir)
 #----------------------Go over all images and predict semantic segmentation in various of classes-------------------------------------------------------------
    fim = 0
    print("Start Predicting " + str(ValidReader.NumFiles) + " images")
    while (ValidReader.itr < ValidReader.NumFiles):
        print(str(fim * 100.0 / ValidReader.NumFiles) + "%")
        fim += 1
        # ..................................Load image.......................................................................................
        FileName=ValidReader.OrderedFiles[ValidReader.itr] #Get input image name
        Images = ValidReader.ReadNextBatchClean()  # load testing image

        # Predict annotation using net
        LabelPred = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0})
             #------------------------Save predicted labels overlay on images---------------------------------------------------------------------------------------------
        misc.imsave(Pred_Dir + "/OverLay/"+ FileName+NameEnd  , Overlay.OverLayLabelOnImage(Images[0],LabelPred[0], w)) #Overlay label on image
        misc.imsave(Pred_Dir + "/Label/" + FileName[:-4] + ".png" + NameEnd, LabelPred[0].astype(np.uint8))
예제 #4
0
def main(argv=None):
    tf.reset_default_graph()
#.........................Placeholders for input image and labels...........................................................................................
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
    GTLabel = tf.placeholder(tf.int32, shape=[None, 3], name="GTLabel")#Ground truth labels for training
  #.........................Build FCN Net...............................................................................................
    Net =  BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network
    feature = Net.build(image)# Create the net and load intial weights
#......................................Get loss functions for neural net work  one loss function for each set of label....................................................................................................
    res = tf.placeholder(tf.float32, shape=[None, 3, 4, 512], name="input_image")
    c = C.Classifier(res)
    Loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=GTLabel,logits=c.classify(),name="Loss"))  # Define loss function for training

   #....................................Create solver for the net............................................................................................
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(Loss)
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
    TrainReader = Data_Reader.Data_Reader(Train_Image_Dir) #Reader for training data
    sess = tf.Session() #Start Tensorflow session
# -------------load trained model if exist-----------------------------------------------------------------
    print("Setting up Saver...")
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer()) #Initialize variables
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
#--------------------------- Create files for saving loss----------------------------------------------------------------------------------------------------------

    f = open(TrainLossTxtFile, "w")
    f.write("Iteration\tloss\t Learning Rate="+str(learning_rate))
    f.close()
#..............Start Training loop: Main Training....................................................................
    for itr in range(MAX_ITERATION):
        print "itr:", itr
        Images,  GTLabels = TrainReader.getBatch() # Load  augmeted images and ground true labels for training
        feed_dict = {image:Images}
        output = sess.run(feature, feed_dict=feed_dict)
        feed_dict = {res:output,GTLabel:GTLabels}
        _, loss = sess.run([optimizer,Loss], feed_dict=feed_dict) # Train one cycle
        print "loss is,", loss
# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------
        if itr % 230 == 0 and itr>0:
            print("Saving Model to file in "+logs_dir)
            saver.save(sess, logs_dir + "model.ckpt", itr) #Save model

#......................Write and display train loss..........................................................................
        '''
    def generate(self,x):
        data = Data_Reader.read_student(x)
        self.root = Tk()
        self.root.title("Student View Window")
        temp_label = Label(self.root,text=(data[1]+" "+data[2]+"    "+ data[0]))
        temp_label.pack()
        temp_label = Label(self.root,text=("Grade: "+data[3]))
        temp_label.pack()
        temp_label = Label(self.root,text=("Clubs: "+str(len(data[4]))))
        temp_label.pack()
        var1 = IntVar()
        num=0
        dec=0
        for i in range(len(data[4])):
            perc=data[4][i][1].split("/")
            num+=float(perc[0])
            dec+=float(perc[1])
            perc=round((float(perc[0])/float(perc[1]))*100,2)

            temp_label = Label(self.root,text=(str(data[4][i][0]+ " " + str(perc))+"%"))
            temp_label.pack()
        temp_label = Label(self.root,text=("Average Attendance: " + str(round((num/dec)*100.0,2))+"%"))
        temp_label.pack()
def main(argv=None):
    keep_prob = tf.placeholder(tf.float32, name="keep_probabilty")  # Dropout probability
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3],
                           name="input_image")  # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
    VesselLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1],
                                 name="VesselLabel")  # Label image for vessel background prediction
    # -------------------------Build Net----------------------------------------------------------------------------------------------
    Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path)  # Create class instance for the net
    Net.build(image, VesselLabel, NUM_CLASSES, keep_prob)  # Build net and load intial weights (weights before training)
    # -------------------------Data reader for validation/testing images-----------------------------------------------------------------------------------------------------------------------------
    ValidReader = Data_Reader.Data_Reader(Image_Dir, Label_Dir, 1)






    #-------------------------Load Trained model if you dont have trained model see: Train.py-----------------------------------------------------------------------------------------------------------------------------

    sess = tf.Session() #Start Tensorflow session

    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        print("ERROR NO TRAINED MODEL IN: "+ckpt.model_checkpoint_path+" See Train.py for creating train network ")
        print("or download pretrained model from"
              "https://drive.google.com/file/d/0B6njwynsu2hXRFpmY1pOV1A4SFE/view?usp=sharing"
              "and extract in log_dir")
        sys.exit()


#--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton---------------------------------------------------------------------------------------------------------------------------------------------

    if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir)
    if not os.path.exists(Pred_Dir+"/OverLay"): os.makedirs(Pred_Dir+"/OverLay")
    if not os.path.exists(Pred_Dir + "/OverLay/Vessel/"): os.makedirs(Pred_Dir + "/OverLay/Vessel/")
    if not os.path.exists(Pred_Dir + "/OverLay/OnePhase/"): os.makedirs(Pred_Dir + "/OverLay/OnePhase/")
    if not os.path.exists(Pred_Dir + "/OverLay/LiquiSolid/"): os.makedirs(Pred_Dir + "/OverLay/LiquiSolid/")
    if not os.path.exists(Pred_Dir + "/OverLay/ExactPhase/"): os.makedirs(Pred_Dir + "/OverLay/ExactPhase/")
    if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label")
    if not os.path.exists(Pred_Dir + "/Label/Vessel/"): os.makedirs(Pred_Dir + "/Label/Vessel/")
    if not os.path.exists(Pred_Dir + "/Label/OnePhase/"): os.makedirs(Pred_Dir + "/Label/OnePhase/")
    if not os.path.exists(Pred_Dir + "/Label/LiquiSolid/"): os.makedirs(Pred_Dir + "/Label/LiquiSolid/")
    if not os.path.exists(Pred_Dir + "/Label/ExactPhase/"): os.makedirs(Pred_Dir + "/Label/ExactPhase/")
    if not os.path.exists(Pred_Dir + "/AllPredicitionsDisplayed/"): os.makedirs(Pred_Dir + "/AllPredicitionsDisplayed/")
    
    print("Running Predictions:")
    print("Saving output to:" + Pred_Dir)
 #----------------------Go over all images and predict semantic segmentation in various of classes-------------------------------------------------------------
    fim = 0
    print("Start Predicting " + str(ValidReader.NumFiles) + " images")
    while (ValidReader.itr < ValidReader.NumFiles):
        print(str(fim * 100.0 / ValidReader.NumFiles) + "%")
        fim += 1

        # ..................................Load image.......................................................................................
        FileName=ValidReader.OrderedFiles[ValidReader.itr]
        Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsExactPhase = ValidReader.ReadNextBatchClean()  # Read images and ground truth annotation
        # Predict annotation using net
        ExactPhase, LiquidSolid, OnePhase, Vessel = sess.run(
            [Net.ExactPhasePred, Net.LiquidSolidPred, Net.PhasePred, Net.VesselPred],
            feed_dict={image: Images, keep_prob: 1.0, VesselLabel: LabelsVessel})
        #------------------------Save predicted labels overlay on images---------------------------------------------------------------------------------------------
        misc.imsave(Pred_Dir + "/OverLay/Vessel/"+ FileName+NameEnd  , Overlay.OverLayLiquidSolid(Images[0],Vessel[0], w))
        misc.imsave(Pred_Dir + "/Label/OnePhase/" + FileName + NameEnd, OnePhase[0])
        misc.imsave(Pred_Dir + "/OverLay/OnePhase/" + FileName + NameEnd,Overlay.OverLayFillLevel(Images[0], OnePhase[0], w))
        misc.imsave(Pred_Dir + "/Label/LiquiSolid/" + FileName + NameEnd, LiquidSolid[0])
        misc.imsave(Pred_Dir + "/OverLay/LiquiSolid/" + FileName + NameEnd,Overlay.OverLayLiquidSolid(Images[0], LiquidSolid[0], w))
        misc.imsave(Pred_Dir + "/Label/ExactPhase/" + FileName + NameEnd,  ExactPhase[0])
        misc.imsave(Pred_Dir + "/OverLay/ExactPhase/" + FileName + NameEnd,Overlay.OverLayExactPhase(Images[0], ExactPhase[0], w))
        misc.imsave(Pred_Dir + "/AllPredicitionsDisplayed/" + FileName+ NameEnd,np.concatenate((Images[0], Overlay.OverLayLiquidSolid(Images[0],Vessel[0],w),Overlay.OverLayFillLevel(Images[0], OnePhase[0], w),Overlay.OverLayLiquidSolid(Images[0], LiquidSolid[0], w), Overlay.OverLayExactPhase(Images[0], ExactPhase[0], w)), axis=1))
예제 #7
0
################Class which build the fully convolutional neural net###########################################################

import inspect
import os
import TensorflowUtils as utils
import numpy as np
import tensorflow as tf
import Inference as Inf
import cv2
import Data_Reader
from os import listdir
data_path = "/Users/anekisei/Documents/Spine_project_horizontal/train_images/"
TestReader = Data_Reader.Data_Reader(data_path, Train=True)
health = []
disease = []
correct_disease = 0
total_disease = 0
correct_risk = 0
total_risk = 0
correct_health = 0
total_health = 0

while TestReader.batchindex != 0:
    Images, GTLabels = TestReader.getBatch()
    result = Inf.predict(Images)
    total_disease += np.sum(GTLabels[:, 2])
    total_risk += np.sum(GTLabels[:, 1])
    total_health += np.sum(GTLabels[:, 0])

    for i in range(result.shape[0]):
        if result[i] == 2 and GTLabels[i, 2] == 1:
def main(argv=None):
    keep_prob = tf.placeholder(
        tf.float32, name="keep_probabilty")  # Dropout probability for training
    image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3], name="input_image"
    )  # Input image batch first dimension image number second dimension width third dimension height fourth dimension RGB
    VesselLabel = tf.placeholder(
        tf.int32, shape=[None, None, None, 1], name="VesselLabel"
    )  # Label image for vessel background prediction use as ROI input mask for the net
    #-------------------------Build Net----------------------------------------------------------------------------------------------
    Net = BuildNetVgg16.BUILD_NET_VGG16(
        vgg16_npy_path=model_path)  # Create class instance for the net
    Net.build(
        image, VesselLabel, NUM_CLASSES, keep_prob
    )  # Build net and load intial weights from vgg16 (weights before training)
    # -------------------------Data reader for validation image-----------------------------------------------------------------------------------------------------------------------------

    ValidReader = Data_Reader.Data_Reader(
        Image_Dir, Label_Dir, Batch_Size
    )  # build reader that will be used to load images and labels from validation set
    sess = tf.Session()  # Start Tensorflow session
    #--------Load trained model--------------------------------------------------------------------------------------------------
    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:  # if
        print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path +
              "See TRAIN.py for training")
        print(
            "or download pretrained model from"
            " https://drive.google.com/file/d/0B6njwynsu2hXRDMxWlBUTWFZM2c/view?usp=sharing "
            "and extract in log_dir")
        sys.exit()
#--------------------Sum of intersection from all validation images for all classes and sum of union for all images and all classes----------------------------------------------------------------------------------
    VesUn = np.float64(np.zeros(
        len(VesseClasses)))  #Sum of union for vessel  region prediciton
    VesIn = np.float64(np.zeros(len(
        VesseClasses)))  #Sum of Intersection   for vessel  region prediciton
    PhaseUn = np.float64(np.zeros(
        len(PhaseClasses)))  #Sum of union for  for fill level prediction
    PhaseIn = np.float64(np.zeros(
        len(PhaseClasses)))  #Sum of Intersection  for fill level prediction
    LiqSolUn = np.float64(np.zeros(
        len(LiquidSolidClasses)))  #Sum of union for liquid solid prediction
    LiqSolIn = np.float64(np.zeros(len(
        LiquidSolidClasses)))  #Sum of Intersection for liquid solid prediction
    ExactPhaseUn = np.float64(np.zeros(len(ExactPhaseClasses)))
    ExactPhaseIn = np.float64(np.zeros(len(ExactPhaseClasses)))
    fim = 0
    print("Start Evaluating intersection over union for " +
          str(ValidReader.NumFiles) + " images")
    #===========================GO over all validation images and caclulate IOU============================================================
    while (ValidReader.itr < ValidReader.NumFiles):
        print(str(fim * 100.0 / ValidReader.NumFiles) + "%")
        fim += 1

        #.........................................Run Predictin/inference on validation................................................................................
        Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsExactPhase = ValidReader.ReadNextBatchClean(
        )  # Read images and ground truth annotation
        #Predict annotation using net
        ExactPhase, LiquidSolid, OnePhase, Vessel = sess.run([
            Net.ExactPhasePred, Net.LiquidSolidPred, Net.PhasePred,
            Net.VesselPred
        ],
                                                             feed_dict={
                                                                 image:
                                                                 Images,
                                                                 keep_prob:
                                                                 1.0,
                                                                 VesselLabel:
                                                                 LabelsVessel
                                                             })
        #............................Calculate Intersection and union for prediction...............................................................

        #        print("-------------------------Vessel IOU----------------------------------------")
        CIOU, CU = IOU.GetIOU(Vessel, LabelsVessel.squeeze(),
                              len(VesseClasses), VesseClasses)
        VesIn += CIOU * CU
        VesUn += CU

        #        print("------------------------One Phase IOU----------------------------------------")
        CIOU, CU = IOU.GetIOU(OnePhase, LabelsOnePhase.squeeze(),
                              len(PhaseClasses), PhaseClasses)
        PhaseIn += CIOU * CU
        PhaseUn += CU

        #        print("--------------------------Liquid Solid IOU-----------------------------------")
        CIOU, CU = IOU.GetIOU(LiquidSolid, LabelsSolidLiquid.squeeze(),
                              len(LiquidSolidClasses), LiquidSolidClasses)
        LiqSolIn += CIOU * CU
        LiqSolUn += CU

        #        print("----------------------All Phases  Phase IOU----------------------------------------")
        CIOU, CU = IOU.GetIOU(ExactPhase, LabelsExactPhase.squeeze(),
                              len(ExactPhaseClasses), ExactPhaseClasses)
        ExactPhaseIn += CIOU * CU
        ExactPhaseUn += CU

#-----------------------------------------Print results--------------------------------------------------------------------------------------
    print(
        "----------------------------------------------------------------------------------"
    )
    print(
        "---------------------------Mean Prediction----------------------------------------"
    )
    print(
        "---------------------IOU=Intersection Over Inion------------------------------------------------------"
    )
    # ------------------------------------------------------------------------------------------------------------
    print(
        "-------------------------Vessel IOU----------------------------------------"
    )
    for i in range(len(VesseClasses)):
        if VesUn[i] > 0:
            print(VesseClasses[i] + "\t" + str(VesIn[i] / VesUn[i]))
    print(
        "------------------------One Phase IOU----------------------------------------"
    )
    for i in range(len(PhaseClasses)):
        if PhaseUn[i] > 0:
            print(PhaseClasses[i] + "\t" + str(PhaseIn[i] / PhaseUn[i]))
    print(
        "--------------------------Liquid Solid IOU-----------------------------------"
    )
    for i in range(len(LiquidSolidClasses)):
        if LiqSolUn[i] > 0:
            print(LiquidSolidClasses[i] + "\t" +
                  str(LiqSolIn[i] / LiqSolUn[i]))
    print(
        "----------------------All Phases  Phase IOU----------------------------------------"
    )
    for i in range(len(ExactPhaseClasses)):
        if ExactPhaseUn[i] > 0:
            print(ExactPhaseClasses[i] + "\t" +
                  str(ExactPhaseIn[i] / ExactPhaseUn[i]))
    NumClasses=NUM_CLASSES,
    PreTrainedModelPath=Pretrained_Encoder_Weights,
    UpdateEncoderBatchNormStatistics=UpdateEncoderBatchNormStatistics
)  # Create net and load pretrained encoder path

if Trained_model_path != "":  # Optional initiate full net by loading a pretrained net
    Net.load_state_dict(torch.load(Trained_model_path))
#optimizer=torch.optim.SGD(params=Net.parameters(),lr=Learning_Rate,weight_decay=Weight_Decay,momentum=0.5)
optimizer = torch.optim.Adam(
    params=Net.parameters(), lr=Learning_Rate,
    weight_decay=Weight_Decay)  # Create adam optimizer

#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------

TrainReader = Data_Reader.Data_Reader(
    ImageDir=Train_Image_Dir, GTLabelDir=Train_Label_Dir,
    BatchSize=Batch_Size)  #Reader for training data
if UseValidationSet:
    ValidReader = Data_Reader.Data_Reader(
        ImageDir=Valid_Image_Dir,
        GTLabelDir=Valid_Label_Dir,
        BatchSize=Batch_Size)  # Reader for validation data

#--------------------------- Create logs files for saving loss during training----------------------------------------------------------------------------------------------------------
f = open(TrainLossTxtFile, "w")  # Training loss log file
f.write("Iteration\tloss\t Learning Rate=" + str(Learning_Rate))
f.close()
if UseValidationSet:
    f = open(ValidLossTxtFile, "w")  #Validation  loss log file
    f.write("Iteration\tloss \t  AvgLoss \t Learning Rate=" +
            str(Learning_Rate))
예제 #10
0
def main():
    # Placeholders for input image and labels
    keep_prob = tf.placeholder(tf.float32,
                               name="keep_probabilty")  # Dropout probability
    image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3], name="input_image"
    )  # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB

    # Build the neural network
    Net = BuildNetVgg16.BUILD_NET_VGG16(
        vgg16_npy_path=model_path)  # Create class instance for the net
    Net.build(image, NUM_CLASSES, keep_prob
              )  # Build net and load intial weights (weights before training)

    # Data reader for validation/testing images
    ValidReader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1)

    # Start Tensorflow session
    sess = tf.Session()
    print("Setting up Saver...")
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer())

    # Load model from checkpoint
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path +
              " See Train.py for creating train network ")
        sys.exit()

    # Create output directories for predicted label, one folder for each granulairy of label prediciton
    if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir)
    if not os.path.exists(Pred_Dir + "/OverLay"):
        os.makedirs(Pred_Dir + "/OverLay")
    if not os.path.exists(Pred_Dir + "/Label"):
        os.makedirs(Pred_Dir + "/Label")
    print("Running Predictions:")
    print("Saving output to:" + Pred_Dir)

    # Iterate through images and predict semantic segmentation for test set
    print("Start Predicting " + str(ValidReader.NumFiles) + " images")
    fim = 0
    while (ValidReader.itr < ValidReader.NumFiles):

        # Load image
        FileName = ValidReader.OrderedFiles[
            ValidReader.itr]  #Get input image name
        Images = ValidReader.ReadNextBatchClean()  # load testing image

        # Predict annotation using neural net
        LabelPred = sess.run(Net.Pred,
                             feed_dict={
                                 image: Images,
                                 keep_prob: 1.0
                             })

        # Save predicted labels overlay on images
        misc.imsave(Pred_Dir + "/OverLay/" + FileName,
                    Overlay.OverLayLabelOnImage(Images[0], LabelPred[0],
                                                w))  #Overlay label on image
        misc.imsave(Pred_Dir + "/Label/" + FileName[:-4] + ".png",
                    LabelPred[0].astype(np.uint8))
        #np.save(Pred_Dir + "/Probs/" + FileName[:-4] + ".npy", probs)

        fim += 1
        print("{:2.2f}%".format(fim * 100.0 / ValidReader.NumFiles))

    # Iterate through images and predict semantic segmentation for validation set
    if not os.path.exists(Valid_Pred_Dir + "/OverLay"):
        os.makedirs(Valid_Pred_Dir + "/OverLay")
    if not os.path.exists(Valid_Pred_Dir + "/Probs"):
        os.makedirs(Valid_Pred_Dir + "/Probs")
    if not os.path.exists(Valid_Pred_Dir + "/Label"):
        os.makedirs(Valid_Pred_Dir + "/Label")
    print("Validating on " + str(ValidReader.NumFiles) + " images")
    ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir,
                                          GTLabelDir=Valid_Labels_Dir,
                                          BatchSize=1)
    roc = ROC(NUM_CLASSES)
    fim = 0
    while (ValidReader.itr < ValidReader.NumFiles):

        # Load image
        FileName = ValidReader.OrderedFiles[
            ValidReader.itr]  # Get input image name
        Images, GTLabels = ValidReader.ReadNextBatchClean(
        )  # load validation image and ground truth labels

        # Predict annotation using neural net
        LabelPred = sess.run(Net.Pred,
                             feed_dict={
                                 image: Images,
                                 keep_prob: 1.0
                             })

        # Get probabilities
        LabelProb = sess.run(Net.Prob,
                             feed_dict={
                                 image: Images,
                                 keep_prob: 1.0
                             })
        sess1 = tf.InteractiveSession()
        probs = np.squeeze(tf.nn.softmax(LabelProb).eval())

        # Import data to ROC object
        roc.add_data(np.squeeze(GTLabels), probs, np.squeeze(LabelPred))

        # Save predicted labels overlay on images
        misc.imsave(Valid_Pred_Dir + "/OverLay/" + FileName,
                    Overlay.OverLayLabelOnImage(Images[0], LabelPred[0],
                                                w))  #Overlay label on image
        misc.imsave(Valid_Pred_Dir + "/Label/" + FileName[:-4] + ".png",
                    LabelPred[0].astype(np.uint8))
        np.save(Valid_Pred_Dir + "/Probs/" + FileName[:-4] + ".npy", probs)

        fim += 1
        print("{:2.2f}%".format(fim * 100.0 / ValidReader.NumFiles))

        #import pdb; pdb.set_trace()

    sess1.close()

    # Compute accuracy, precision, recall, and f-1 score
    acc = roc.accuracy()
    print(roc.report)
    print("Total Accuracy: {:3.2f}".format(acc))
w = 0.5  # weight of overlay on image for display
Output_Dir = "Output_Prediction/"  #Folder where the output prediction will be save
NameEnd = ""  # Add this string to the ending of the file name optional
NUM_CLASSES = 3  # Number of classes
UpdateNormBatchStatisics = False  # Do you want ot calculate batch normstatistics on the fly or used training statistics
#-----------------------------Create net and load weight--------------------------------------------------------------------------------------------
Net = NET_FCN.Net(NumClasses=NUM_CLASSES)  #Build Net
Net.load_state_dict(torch.load(Trained_model_path))  # Load Traine model
if not UpdateNormBatchStatisics:
    Net.eval()  #Dont update batch normalization statitics
print("Model weights loaded from: " + Trained_model_path)
################################################################################################################################################################################

# -------------------------Data reader for  images-----------------------------------------------------------------------------------------------------------------------------
Reader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1)

#--------------------Create output folders for predicted label, one folder for each granulairy of label prediciton---------------------------------------------------------------------------------------------------------------------------------------------

if not os.path.exists(Output_Dir): os.makedirs(Output_Dir)
if not os.path.exists(Output_Dir + "/OverLay"):
    os.makedirs(Output_Dir + "/OverLay")
if not os.path.exists(Output_Dir + "/Label"):
    os.makedirs(Output_Dir + "/Label")

print("Running Predictions:")
print("Saving output to:" + Output_Dir)
#----------------------Go over all images and predict semantic segmentation in various of classes-------------------------------------------------------------
fim = 0
print("Start Predicting " + str(Reader.NumFiles) + " images")
while (Reader.itr < Reader.NumFiles):
예제 #12
0
    def inference(self,
                  test_path='/data/put_data/cclin/ntu/dlcv2018/hw3/validation',
                  gen_from=None,
                  gen_from_ckpt=None,
                  out_path=None,
                  bsize=2,
                  out_img_size=512):
        ### create output folder
        if gen_from is None:
            gen_from = os.path.join(self.result_path, self.model_name,
                                    'models')
        if out_path is not None:
            if os.path.exists(out_path):
                print('WARNING: the output path "{}" already exists!'.format(
                    out_path))
            else:
                os.makedirs(out_path)
        else:
            out_path = os.path.join(gen_from, 'outputs')

        ### load previous model if possible
        could_load, checkpoint_counter = self.load(gen_from, gen_from_ckpt)
        if could_load:
            print(" [*] Load SUCCESS")
            #### GTLabelDir='' to read image and mask file names ('xxxx_mask.png')
            data_gen_test = Data_Reader.Data_Reader(test_path,
                                                    GTLabelDir='',
                                                    BatchSize=bsize,
                                                    img_size=self.img_size)
            nBatches = np.int(
                np.ceil(data_gen_test.NumFiles / data_gen_test.BatchSize))
            enlarge_factor = (out_img_size / self.img_size) * 8
            # print('enlarge_factor = %d' % enlarge_factor)
            for i in tqdm.tqdm(range(nBatches)):
                batch_image, batch_mask_path = data_gen_test.ReadNextBatchClean(
                )
                labels_pred = self.sess.run(
                    self.label_predict,
                    feed_dict={
                        self.image: batch_image,
                        # self.keep_prob: 1.0,
                        self.bn_train: False
                    })
                ##### debug: also save smaller masks (e.g., 256)
                # for j in range(len(batch_mask_path)):
                #     mask = self.label_to_rgb(labels_pred[j])
                #     if not os.path.exists(os.path.join(out_folder_path, 'small')):
                #         os.makedirs(os.path.join(out_folder_path, 'small'))
                #     skimage.io.imsave(os.path.join(out_folder_path, 'small', batch_mask_path[j][0:-8] + 'mask_.png'), mask)

                ##### resize labels_pred into out_img_size (e.g., 256 --> 512)
                labels_pred = np.repeat(np.repeat(labels_pred,
                                                  enlarge_factor,
                                                  axis=1),
                                        enlarge_factor,
                                        axis=2)
                for j in range(len(batch_mask_path)):
                    mask = self.label_to_rgb(labels_pred[j])
                    skimage.io.imsave(
                        os.path.join(out_path, batch_mask_path[j]), mask)
        else:
            print(" [*] Failed to find a checkpoint")
예제 #13
0
    def train(
            self,
            init_from=None,
            train_path='/data/put_data/cclin/ntu/dlcv2018/hw3/train',
            valid_path='/data/put_data/cclin/ntu/dlcv2018/hw3/validation',
            nEpochs=10,
            bsize=2,
            # keep_prob_train=0.5,
            learning_rate_start=1e-5,
            patience=2):
        ### create a dedicated folder for this model
        if os.path.exists(os.path.join(self.result_path, self.model_name)):
            print('WARNING: the folder "{}" already exists!'.format(
                os.path.join(self.result_path, self.model_name)))
        else:
            os.makedirs(os.path.join(self.result_path, self.model_name))
            os.makedirs(
                os.path.join(self.result_path, self.model_name, 'outputs'))
            os.makedirs(
                os.path.join(self.result_path, self.model_name, 'models'))
        ### create logger
        log_path_train = os.path.join(self.result_path, self.model_name,
                                      'training_loss.txt')
        f = open(log_path_train, "w")
        f.write("epoch\tloss\tlearning rate\n")
        f.close()
        if valid_path is not None:
            log_path_valid = os.path.join(self.result_path, self.model_name,
                                          'validation_loss.txt')
            f = open(log_path_valid, "w")
            f.write("epoch\tloss\tlearning rate\n")
            f.close()
        ### data generator
        data_gen_train = Data_Reader.Data_Reader(train_path,
                                                 GTLabelDir=train_path,
                                                 BatchSize=bsize,
                                                 img_size=self.img_size)
        if valid_path is not None:
            data_gen_valid = Data_Reader.Data_Reader(valid_path,
                                                     GTLabelDir=valid_path,
                                                     BatchSize=bsize,
                                                     img_size=self.img_size)

        ### initialization
        initOp = tf.global_variables_initializer()
        self.sess.run(initOp)

        ### load previous model if possible
        train_from_scratch = True
        epoch_counter = 0
        if init_from is not None:
            could_load, checkpoint_counter = self.load(init_from)
            if could_load:
                epoch_counter = checkpoint_counter
                train_from_scratch = False
                print(" [*] Load SUCCESS")
            else:
                print(" [!] Load failed...")
        else:
            print(" [@] train from scratch")

        ### the main loop
        learning_rate_used = learning_rate_start
        best_loss = 0
        stopping_step = 0
        global_patience_count = 0
        for epoch in range(nEpochs):
            loss_temp = []
            nBatches = np.int(
                np.ceil(data_gen_train.NumFiles / data_gen_train.BatchSize))
            # print("Training on " + str(data_gen_train.NumFiles) + " images")
            for i in tqdm.tqdm(range(nBatches)):
                batch_image, batch_label_true, _ = data_gen_train.ReadNextBatchClean(
                )
                batch_label_true_small = np.zeros([
                    batch_label_true.shape[0], self.img_size // 8,
                    self.img_size // 8, 1
                ],
                                                  dtype=np.int)
                for f in range(batch_label_true.shape[0]):
                    batch_label_true_small[f, :, :,
                                           0] = skimage.transform.resize(
                                               batch_label_true[f, :, :, 0], [
                                                   self.img_size // 8,
                                                   self.img_size // 8
                                               ],
                                               mode='reflect',
                                               preserve_range=True).astype(int)
                _, TLoss = self.sess.run(
                    [self.train_op, self.loss],
                    feed_dict={
                        self.image: batch_image,
                        self.label_true: batch_label_true_small,
                        # self.keep_prob: keep_prob_train,
                        self.learning_rate: learning_rate_used,
                        self.bn_train: True
                    })
                loss_temp.append(TLoss)
            #### counter for file names of saved models
            epoch_counter += 1
            #### record training loss for every epoch
            buf = str(epoch_counter) + '\t' + str(
                np.mean(loss_temp)) + '\t' + str(learning_rate_used)
            self.print2file(buf, log_path_train)
            #### validation
            if valid_path is not None:
                if epoch_counter == 1 or epoch_counter % 10 == 0 or (
                        epoch_counter > 60
                        and best_loss == np.mean(loss_temp)):
                    out_path = os.path.join(self.result_path, self.model_name,
                                            'out_' + str(epoch_counter))
                    os.makedirs(out_path)
                loss_temp = []
                nBatches = np.int(
                    np.ceil(data_gen_valid.NumFiles /
                            data_gen_valid.BatchSize))
                # print("Calculating validation on " + str(data_gen_valid.NumFiles) + " images")
                for i in tqdm.tqdm(range(nBatches)):
                    batch_image, batch_label_true, batch_mask_path = data_gen_valid.ReadNextBatchClean(
                    )
                    batch_label_true_small = np.zeros([
                        batch_label_true.shape[0], self.img_size // 8,
                        self.img_size // 8, 1
                    ],
                                                      dtype=np.int)
                    for f in range(batch_label_true.shape[0]):
                        batch_label_true_small[
                            f, :, :, 0] = skimage.transform.resize(
                                batch_label_true[f, :, :, 0],
                                [self.img_size // 8, self.img_size // 8],
                                mode='reflect',
                                preserve_range=True).astype(int)
                    VLoss = self.sess.run(
                        self.loss,
                        feed_dict={
                            self.image: batch_image,
                            self.label_true: batch_label_true_small,
                            # self.keep_prob: 1.0,
                            self.bn_train: False
                        })
                    loss_temp.append(VLoss)
                    #### run inference for every 10 epochs
                    if epoch_counter == 1 or epoch_counter % 10 == 0 or (
                            epoch_counter > 60
                            and best_loss == np.mean(loss_temp)):
                        labels_pred = self.sess.run(
                            self.label_predict,
                            feed_dict={
                                self.image: batch_image,
                                # self.keep_prob: 1.0,
                                self.bn_train: False
                            })
                        ##### resize labels_pred into out_img_size (e.g., 256 --> 512)
                        labels_pred = np.repeat(np.repeat(labels_pred,
                                                          16,
                                                          axis=1),
                                                16,
                                                axis=2)
                        for j in range(len(batch_mask_path)):
                            mask = self.label_to_rgb(labels_pred[j])
                            skimage.io.imsave(
                                os.path.join(out_path, batch_mask_path[j]),
                                mask)
                buf = str(epoch_counter) + '\t' + str(
                    np.mean(loss_temp)) + '\t' + str(learning_rate_used)
                self.print2file(buf, log_path_valid)
                print(
                    'epoch_counter: %d, np.mean(loss_temp) = %f, best_loss = %f, stopping_step = %d'
                    % (epoch_counter, np.mean(loss_temp), best_loss,
                       stopping_step))
                #### update learning rate if necessary
                if epoch == 0:
                    best_loss = np.mean(loss_temp)
                else:
                    if (best_loss - np.mean(loss_temp)) > 0.0001:
                        best_loss = np.mean(loss_temp)
                        stopping_step = 0
                        ##### save model whenever improvement
                        save_path = self.saver.save(
                            self.sess,
                            os.path.join(self.result_path, self.model_name,
                                         'models', self.model_name + '.model'),
                            global_step=epoch_counter)
                    else:
                        stopping_step += 1
                    if stopping_step >= patience:
                        global_patience_count += 1
                        if global_patience_count < 3:
                            print(
                                "================================================"
                            )
                            print("Update learning rate from %f to %f" %
                                  (learning_rate_used, learning_rate_used / 2))
                            print(
                                "================================================"
                            )
                            stopping_step = 0
                            learning_rate_used = learning_rate_used / 2
                        else:
                            print(
                                "================================================"
                            )
                            print(
                                "Update learning rate %d times, stop training"
                                % global_patience_count)
                            print(
                                "================================================"
                            )
                            break
 def open_file(self):
     print "open_file"
     self.kurlrequester.setStartDir("../input")
     url = str(self.kurlrequester.text())  #zamienia Qstring na string
     reader = Data_Reader(url)
     self.add_to_data_list(reader)
예제 #15
0
def main(argv=None):
    # .........................Placeholders for input image and labels........................................................................
    keep_prob = tf.placeholder(tf.float32,
                               name="keep_probabilty")  # Dropout probability
    image = tf.placeholder(
        tf.float32, shape=[None, None, None, 3], name="input_image"
    )  # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB

    # -------------------------Build Net----------------------------------------------------------------------------------------------
    Net = BuildNetVgg16.BUILD_NET_VGG16(
        vgg16_npy_path=model_path)  # Create class instance for the net
    Net.build(image, NUM_CLASSES, keep_prob
              )  # Build net and load intial weights (weights before training)
    # -------------------------Data reader for validation/testing images-----------------------------------------------------------------------------------------------------------------------------
    ValidReader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1)
    # print(ValidReader)

    # exit()
    #-------------------------Load Trained model if you dont have trained model see: Train.py-----------------------------------------------------------------------------------------------------------------------------

    sess = tf.Session()  #Start Tensorflow session

    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:  # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    else:
        print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path +
              " See Train.py for creating train network ")
        sys.exit()


#--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton---------------------------------------------------------------------------------------------------------------------------------------------

    if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir)
    if not os.path.exists(Pred_Dir + "/OverLay"):
        os.makedirs(Pred_Dir + "/OverLay")
    if not os.path.exists(Pred_Dir + "/Label"):
        os.makedirs(Pred_Dir + "/Label")

    print("Running Predictions:")
    print("Saving output to:" + path_XML)
    #----------------------Go over all images and predict semantic segmentation in various of classes-------------------------------------------------------------
    fim = 0
    print("Start Predicting " + str(ValidReader.NumFiles) + " images")

    startTime = time.time()
    while (ValidReader.itr < ValidReader.NumFiles):

        # ..................................Load image.......................................................................................
        FileName = ValidReader.OrderedFiles[
            ValidReader.itr]  #Get input image name
        Images = ValidReader.ReadNextBatchClean()  # load testing image

        # Predict annotation using net
        LabelPred = sess.run(Net.Pred,
                             feed_dict={
                                 image: Images,
                                 keep_prob: 1.0
                             })
        #------------------------Save predicted labels overlay on images---------------------------------------------------------------------------------------------
        endTimePredict = time.time()
        print('\n\nTime predict image', FileName, ' : ',
              endTimePredict - startTime)
        ImageResult = Images[0].copy()
        LabelResult = LabelPred[0].copy()

        # print('Label shape :  ',LabelResult.shape)
        # print('Images shape:  ',ImageResult.shape)
        LabelResult = LabelResult.astype(np.uint8)

        # print('width_ORG,height_ORG', width_ORG,height_ORG)
        # print('Images shape after resize:  ',ImageResult.shape)

        imgORG = cv2.imread(Image_Read + FileName)
        height_ORG, width_ORG, _ = imgORG.shape
        print('imgORG shape ', imgORG.shape)
        print('ListSize : ', ListSize[fim][1], '       ', ListSize[fim][0])

        ImageResult = cv2.resize(ImageResult, (width_ORG, height_ORG))
        # LabelResult = cv2.resize(LabelResult, (ListSize[fim][1], ListSize[fim][0]))
        LabelResult = cv2.resize(LabelResult, (width_ORG, height_ORG))

        # file_mask=path_mask+FileName
        # cv2.imwrite(file_mask,LabelResult)
        # ImageResult = cv2.resize(ImageResult, (height_ORG, width_ORG))
        # LabelResult = cv2.resize(LabelResult, (height_ORG, width_ORG))
        # print('min LabelResult',LabelResult.min())
        # print('max LabelResult',LabelResult.max())

        CreateXML.SaveXML(LabelResult, FileName, path_XML)
        print('Time CreateXML image', FileName, ' : ',
              time.time() - endTimePredict)
        startTime = time.time()
        fim += 1
        print('Processing : ', str(fim * 100.0 / ValidReader.NumFiles) + "%")
예제 #16
0
def main(argv=None):
    tf.reset_default_graph()
    keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
    image = tf.placeholder(tf.float32,
                           shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3],
                           name="input_image")
    annotation = tf.placeholder(tf.int32,
                                shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1],
                                name="annotation")

    pred_annotation, logits = inference(image, keep_probability)
    loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits,
        labels=tf.squeeze(annotation, squeeze_dims=[3]),
        name="entropy")))

    loss_summary = tf.summary.scalar("entropy", loss)

    trainable_var = tf.trainable_variables()
    train_op = train(loss, trainable_var)
    TrainReader = Data_Reader.Data_Reader(Train_Image_Dir,
                                          GTLabelDir=Train_Label_Dir)
    ValReader = Data_Reader.Data_Reader(Val_Image_Dir,
                                        GTLabelDir_Val=Val_Label_Dir)

    sess = tf.Session()

    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
    train_loss_mean = 0
    epoch = 0

    for itr in range(MAX_ITERATION):
        train_images, train_annotations = TrainReader.ReadNextBatchClean()
        feed_dict = {
            image: train_images,
            annotation: train_annotations,
            keep_probability: 0.85
        }

        sess.run(train_op, feed_dict=feed_dict)

        train_loss, summary_str = sess.run([loss, loss_summary],
                                           feed_dict=feed_dict)
        train_loss_mean = np.append(train_loss_mean, train_loss)
        if itr % image_count == 0 and itr > 0:
            print("Saving Model to file in " + logs_dir)
            saver.save(sess, logs_dir + "model.ckpt", itr)
        if itr % image_count == 0:
            epoch = epoch + 1
            print("epoch: %d, Train_loss:%g" %
                  (epoch, np.mean(train_loss_mean)))

        if itr % 500 == 0:
            valid_images, valid_annotations = ValReader.ReadNextBatchClean()
            valid_loss = sess.run(loss,
                                  feed_dict={
                                      image: valid_images,
                                      annotation: valid_annotations,
                                      keep_probability: 1.0
                                  })
            print("Validation_loss: %g" % (valid_loss))
예제 #17
0
# sys.path.append('D:\\code\\Python\\Experiment\\newNetwork\\new\\yszxx\\yszxx')
'''自建包'''
import _Base_ as base
import Data_Reader
import Models
import Optimizer
import Adversary
'''超参数'''
param = {
    'test_batch_size': 100,
    'epsilon': 0.2,
}
'''加载测试数据'''
test_dataset = Data_Reader.Mnist.Mnist_dataset().get_test_dataset()
loader_test = Data_Reader.get_dataloader(dataset=test_dataset,
                                         batch_size=param['test_batch_size'],
                                         shuffle=False)
# 10000-1
'''加载模型'''
net = Models.Lenet5.Lenet5()
net = Models.load_state_dict(net, './lenet5_dict.pkl')
num_correct, num_samples, acc = Optimizer.test(net, loader_test)
print('[Start] right predict:(%d/%d) ,pre test_acc=%.4f' %
      (num_correct, num_samples, acc))
# right predict:(936/10000) ,pre test_acc=9.3600
'''模型测试过程'''
base.enable_cuda(net)  # 使用cuda
net.eval()  # 推断模式
for p in net.parameters():  # 将模型参数的梯度获取设为false()
    p.requires_grad = False
Optimizer.test(net, loader_test)  # 测试干净样本的性能
def main(argv=None):
    tf.reset_default_graph()
    keep_prob= tf.placeholder(tf.float32, name="keep_probabilty") #Dropout probability
#.........................Placeholders for input image and labels...........................................................................................
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
    VesselLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="VesselLabel")  # Label image for vessel background prediction
    PhaseLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="PhaseLabel")#Label image for Vessel Full  and background prediction
    LiquidSolidLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="LiquidSolidLabel")  # Label image  for liquid solid  vessel  background prediction
    AllPhasesLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="AllPhasesLabel")  # Label image for fine grain phase prediction (liquid solid powder foam
#.........................Build FCN Net...............................................................................................
    Net =  BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network
    Net.build(image,NUM_CLASSES,keep_prob)# Create the net and load intial weights
#......................................Get loss functions for neural net work  one loss function for each set of label....................................................................................................
    VesselLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(VesselLabel, squeeze_dims=[3]), logits=Net.VesselProb,name="VesselLoss")))  # Define loss function for training
    PhaseLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(PhaseLabel, squeeze_dims=[3]), logits=Net.PhaseProb,name="PhaseLoss")))  # Define loss function for training
    LiquidSolidLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(LiquidSolidLabel, squeeze_dims=[3]), logits=Net.LiquidSolidProb,name="LiquidSolidLoss")))  # Define loss function for training
    AllPhasesLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(AllPhasesLabel, squeeze_dims=[3]), logits=Net.AllPhasesProb,name="PhaseLabel")))  # Define loss function for training
    WeightDecayLoss=Net.SumWeights*Weight_Loss_Rate #Weight decay loss
    TotalLoss=VesselLoss+PhaseLoss+LiquidSolidLoss+AllPhasesLoss+WeightDecayLoss# Loss  is  the  sum of loss for all categories
#....................................Create solver for the net............................................................................................
    trainable_var = tf.trainable_variables()
    train_op = train(TotalLoss, trainable_var)
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
    TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, Label_Dir,Batch_Size) #Reader for training data
    ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, Label_Dir, Batch_Size) # Reader for validation data
    sess = tf.Session() #Start Tensorflow session
# -------------load trained model if exist-----------------------------------------------------------------

    print("Setting up Saver...")
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")

#---------------------------Start Training: Create loss files for saving loss during traing ----------------------------------------------------------------------------------------------------------

    f = open(TrainLossTxtFile, "w")
    f.write("Iteration\tTotal_Loss\tVessel Loss\tOne_Phase_Loss%\tLiquid_Solid_Loss\tAll_Phases_loss\t Learning Rate="+str(learning_rate))
    f.close()
    f = open(ValidLossTxtFile, "w")
    f.write("Iteration\tTotal_Loss\tVessel Loss\tOne_Phase_Loss%\tLiquid_Solid_Loss\tAll_Phases_loss\t Learning Rate=" + str(learning_rate))
    f.close()
#..............Start Training loop: Main Training....................................................................
    for itr in range(MAX_ITERATION):
        Images,LabelsVessel,LabelsOnePhase,LabelsSolidLiquid,LabelsAllPhases=TrainReader.ReadAndAugmentNextBatch() # Load  augmeted images and ground true labels for training
        feed_dict = {image: Images,VesselLabel:LabelsVessel, PhaseLabel: LabelsOnePhase,LiquidSolidLabel:LabelsSolidLiquid,AllPhasesLabel:LabelsAllPhases, keep_prob: 0.5}
        sess.run(train_op, feed_dict=feed_dict) # Train one cycle
# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------
        if itr % 20000 == 0: saver.save(sess, logs_dir + "model.ckpt", itr)
#......................Write and display train loss..........................................................................
        if itr % 10==0:
            # Calculate train loss
            Tot_Loss,Ves_Loss,Phase_Loss,LiquidSolid_Loss,AllPhase_Loss= sess.run([TotalLoss,VesselLoss, PhaseLoss,LiquidSolidLoss,AllPhasesLoss], feed_dict=feed_dict)
            print("Step: %d,  Total_loss:%g,  Vessel_Loss:%g,  OnePhases_Loss:%g,  LiquidSolid_Loss:%g,  AllPhases_Loss:%g," % (itr, Tot_Loss,Ves_Loss,Phase_Loss,LiquidSolid_Loss,AllPhase_Loss))
            #Write train loss to file
            with open(TrainLossTxtFile, "a") as f:
                f.write("\n"+str(itr)+"\t"+str(Tot_Loss)+"\t"+str(Ves_Loss)+"\t"+str(Phase_Loss)+"\t"+str(LiquidSolid_Loss)+"\t"+str(AllPhase_Loss))
                f.close()

#......................Write and display Validation Set Loss by running loss on all validation images.....................................................................
        if itr % 500 == 0:

            SumTotalLoss=np.float64(0.0)
            SumVesselLoss = np.float64(0.0)
            SumOnePhassLoss = np.float64(0.0)
            SumLiquidSolidLoss= np.float64(0.0)
            SumAllPhase_Loss= np.float64(0.0)

            NBatches=np.int(np.ceil(ValidReader.NumFiles/ValidReader.BatchSize))
            for i in range(NBatches):# Go over all validation image
                Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsAllPhases = ValidReader.ReadNextBatchClean() # load validation image and ground true labels
                feed_dict = {image: Images, VesselLabel: LabelsVessel, PhaseLabel: LabelsOnePhase,LiquidSolidLabel: LabelsSolidLiquid, AllPhasesLabel: LabelsAllPhases,keep_prob: 1}
                # Calculate loss for all labels set
                Tot_Loss, Ves_Loss, Phase_Loss, LiquidSolid_Loss, AllPhase_Loss = sess.run([TotalLoss, VesselLoss, PhaseLoss, LiquidSolidLoss, AllPhasesLoss], feed_dict=feed_dict)

                SumTotalLoss+=Tot_Loss
                SumVesselLoss+=Ves_Loss
                SumOnePhassLoss+=Phase_Loss
                SumLiquidSolidLoss+=LiquidSolid_Loss
                SumAllPhase_Loss+=AllPhase_Loss
                NBatches+=1

            SumTotalLoss/=NBatches
            SumVesselLoss /= NBatches
            SumOnePhassLoss /= NBatches
            SumLiquidSolidLoss/=NBatches
            SumAllPhase_Loss/= NBatches
            print("Validation Total_loss:%g,  Vessel_Loss:%g,  OnePhases_Loss:%g,  LiquidSolid_Loss:%g,  AllPhases_Loss:%g," % (SumTotalLoss, SumVesselLoss, SumOnePhassLoss, SumLiquidSolidLoss, SumAllPhase_Loss))
            with open(ValidLossTxtFile, "a") as f:

                f.write("\n" + str(itr) + "\t" + str(SumTotalLoss) + "\t" + str(SumVesselLoss) + "\t" + str(SumOnePhassLoss) + "\t" + str(SumLiquidSolidLoss) + "\t" + str(SumAllPhase_Loss))
                f.close()
예제 #19
0
def main(argv=None):
	tf.reset_default_graph()
	keep_prob = tf.placeholder(tf.float32, name="keep_probabilty")	# Dropout probability

	# Placeholders for input image and labels
	image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image")
	GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="GTLabel")

  	# Build FCN Network
	Net =  BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class for the network
	Net.build(image, NUM_CLASSES, keep_prob) # Create the net and load intial weights

	# Get loss functions for neural net work one loss function for each set of labels
	Loss = -tf.reduce_sum(tf.multiply(tf.to_float(tf.one_hot(tf.squeeze(GTLabel, squeeze_dims=[3]), NUM_CLASSES)), tf.log(tf.nn.softmax(Net.Prob) + 1e-12)))

	# Create solver for the net
	trainable_var = tf.trainable_variables() # Collect all trainable variables for the net
	train_op = train(Loss, trainable_var) # Create the train operation for the net

	# Create reader for training data
	TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, \
				GTLabelDir=Train_Label_Dir, BatchSize=Batch_Size)
    
	# Create reader for validation data
	if UseValidationSet:
		ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, \
				GTLabelDir=Valid_Labels_Dir, BatchSize=Validation_Batch_Size) 

	# Start TensorFlow session
	sess = tf.Session() 
	print("Setting up Saver...")
	saver = tf.train.Saver()
	sess.run(tf.global_variables_initializer()) # Initialize variables
	ckpt = tf.train.get_checkpoint_state(logs_dir)
	if ckpt and ckpt.model_checkpoint_path: # Restore trained model, if it exists
		saver.restore(sess, ckpt.model_checkpoint_path)
		print("Model restored...")

	# Create files for logging progress
	f = open(TrainLossTxtFile, "w")
	f.write("Training Loss\n")
	f.write("Learning_rate\t" + str(learning_rate) + "\n")
	f.write("Batch_size\t" + str(Batch_Size) + "\n")
	f.write("Itr\tLoss")
	f.close()
	if UseValidationSet:
		f = open(ValidRecTxtFile, "w")
		f.write("Validation Record\n")
		f.write("Learning_rate\t" + str(learning_rate) + "\n")
		f.write("Batch_size\t" + str(Batch_Size) + "\n")
		f.write("Itr\tLoss\tAccuracy")
		f.close()

	# Start Training loop: Main Training
	for itr in range(MAX_ITERATION):
		if UseStochasticity:
			Images, GTLabels = TrainReader.ReadAndAugmentNextBatch() # Load images and labels
		else:
			Images, GTLabels = TrainReader.ReadNextBatchClean()
		feed_dict = {image: Images,GTLabel:GTLabels, keep_prob: 0.5}
		sess.run(train_op, feed_dict=feed_dict) # Train one cycle
		
		# Save trained model
		if itr % 500==0 and itr>0:
			print("Saving Model to file in " + logs_dir)
			saver.save(sess, os.path.join(logs_dir, "model.ckpt"), itr) # Save model
		
		# Write and display train loss
		if itr % 1==0:
        	# Calculate train loss
			feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1}
			TLoss=sess.run(Loss, feed_dict=feed_dict)
			print("Step " + str(itr) + " Train Loss=" + str(TLoss))
        	# Write train loss to file
			with open(TrainLossTxtFile, "a") as f:
				f.write("\n"+str(itr)+"\t"+str(TLoss))
				f.close()

		# Write and display Validation Set Loss 
		if UseValidationSet and itr % 25 == 0:
			SumAcc = np.float64(0.0)
			SumLoss = np.float64(0.0)
			NBatches = np.int(np.ceil(ValidReader.NumFiles/ValidReader.BatchSize))
			print("Calculating Validation on " + str(ValidReader.NumFiles) + " Images")
			for i in range(NBatches): 
				Images, GTLabels= ValidReader.ReadNextBatchClean() # load validation data
				feed_dict = {image: Images,GTLabel: GTLabels, keep_prob: 1.0}
				
				# Calculate loss for all labels set
				TLoss = sess.run(Loss, feed_dict=feed_dict)
				SumLoss += TLoss

				# Compute validation accuracy
				pred = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0})
				acc = accuracy_score(np.squeeze(GTLabels).ravel(), np.squeeze(pred).ravel())
				SumAcc += acc

			# Print validation status to console
			print("Epoch: " + str(TrainReader.Epoch))

			SumAcc/=NBatches
			SumLoss/=NBatches
			print("Validation Loss: " + str(SumLoss))
			print("Validation Accuracy: " + str(SumAcc))
			with open(ValidRecTxtFile, "a") as f:
				f.write("\n" + str(itr) + "\t" + str(SumLoss) + "\t" + str(SumAcc))
				f.close()			
예제 #20
0
param = {
    'batch_size': 100,  # 训练时每次批量处理图片的数量
    'test_batch_size': 100,  # 测试时每次批处理图片的数量
    'num_epochs': 1000,  # 对所有样本训练的轮数
    # 'learning_rate': 1e-4,  # 学习率
    'learning_rate': 1e-5,  # 学习率
    'weight_decay': 5e-5,  # 权重衰减,相当于在损失函数后加入正则化项,降低整体权重值,防止过拟合
    # 'weight_decay': 0,  # 权重衰减,相当于在损失函数后加入正则化项,降低整体权重值,防止过拟合
    'epsilon': 0.2
}

'''
加载数据集
'''
train_dataset, test_dataset = Data_Reader.Mnist.Mnist_dataset().get_dataset()
loader_train = Data_Reader.get_dataloader(dataset=train_dataset,
                                          batch_size=param['batch_size'])
loader_test = Data_Reader.get_dataloader(dataset=test_dataset,
                                         batch_size=param['test_batch_size'])

'''
搭建模型
模型在model.py里面搭建好了,这里直接调用
'''
modelpath = './train3_AdvT.pkl'
net = Models.Lenet5.Lenet5()  # 加载模型
net = Models.load_state_dict(net, modelpath)
base.enable_cuda(net)  # 使用cuda
num_correct, num_samples, acc = Optimizer.test(net, loader_test)  # 测试一下最初的效果
print('[Start] right predict:(%d/%d) ,pre test_acc=%.4f%%' % (num_correct, num_samples, acc))

'''
예제 #21
0
                          dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='float32')
#模型初始化
model = cnn_model.cnn_model(image)
#loss设定
cost = fluid.layers.square_error_cost(input=model, label=label)
avg_cost = fluid.layers.mean(cost)

# 获取训练和测试程序
test_program = fluid.default_main_program().clone(for_test=True)
# 定义优化方法
optimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.001)
opts = optimizer.minimize(avg_cost)

# 获取自定义数据
train_reader = paddle.batch(reader=Data_Reader.train_reader(
    train_list_path, crop_size, resize_size),
                            batch_size=16)
test_reader = paddle.batch(reader=Data_Reader.test_reader(
    test_list_path, crop_size),
                           batch_size=16)

# 定义执行器
#place = fluid.CPUPlace()  #CPU训练
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)

# 进行参数初始化
exe.run(fluid.default_startup_program())

# 定义输入数据维度
feeder = fluid.DataFeeder(place=place, feed_list=[image, label])
예제 #22
0
파일: TRAIN.py 프로젝트: mcbrs1a/TestPet
def main(argv=None):
    tf.reset_default_graph()
    keep_prob= tf.placeholder(tf.float32, name="keep_probabilty") #Dropout probability
#.........................Placeholders for input image and labels...........................................................................................
    image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB
    GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="GTLabel")#Ground truth labels for training
  #.........................Build FCN Net...............................................................................................
    Net =  BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network
    Net.build(image, NUM_CLASSES,keep_prob)# Create the net and load intial weights
#......................................Get loss functions for neural net work  one loss function for each set of label....................................................................................................
    Loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(GTLabel, squeeze_dims=[3]), logits=Net.Prob,name="Loss")))  # Define loss function for training
   #....................................Create solver for the net............................................................................................
    trainable_var = tf.trainable_variables() # Collect all trainable variables for the net
    train_op = train(Loss, trainable_var) #Create Train Operation for the net
#----------------------------------------Create reader for data set--------------------------------------------------------------------------------------------------------------
    TrainReader = Data_Reader.Data_Reader(Train_Image_Dir,  GTLabelDir=Train_Label_Dir,BatchSize=Batch_Size) #Reader for training data
    if UseValidationSet:
        ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir,  GTLabelDir=Valid_Labels_Dir,BatchSize=Batch_Size) # Reader for validation data
    sess = tf.Session() #Start Tensorflow session
# -------------load trained model if exist-----------------------------------------------------------------
    print("Setting up Saver...")
    saver = tf.train.Saver()
    sess.run(tf.global_variables_initializer()) #Initialize variables
    ckpt = tf.train.get_checkpoint_state(logs_dir)
    if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it
        saver.restore(sess, ckpt.model_checkpoint_path)
        print("Model restored...")
#--------------------------- Create files for saving loss----------------------------------------------------------------------------------------------------------

    f = open(TrainLossTxtFile, "w")
    f.write("Iteration\tloss\t Learning Rate="+str(learning_rate))
    f.close()
    if UseValidationSet:
       f = open(ValidLossTxtFile, "w")
       f.write("Iteration\tloss\t Learning Rate=" + str(learning_rate))
       f.close()
#..............Start Training loop: Main Training....................................................................
    for itr in range(MAX_ITERATION):
        print(itr)
        Images,  GTLabels =TrainReader.ReadAndAugmentNextBatch() # Load  augmeted images and ground true labels for training
        feed_dict = {image: Images,GTLabel:GTLabels, keep_prob: 0.5}
        sess.run(train_op, feed_dict=feed_dict) # Train one cycle
# --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------
        if itr % 500 == 0 and itr>0:
            print("saving here Rhodri's Version")
            print("Saving Model to file in "+logs_dir)
            saver.save(sess, logs_dir + "model.ckpt", itr) #Save model
#......................Write and display train loss..........................................................................
        if itr % 10==0:
            # Calculate train loss
            feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1}
            TLoss=sess.run(Loss, feed_dict=feed_dict)
            print("Step "+str(itr)+" Train Loss="+str(TLoss))
            #Write train loss to file
            with open(TrainLossTxtFile, "a") as f:
                f.write("\n"+str(itr)+"\t"+str(TLoss))
                f.close()
#......................Write and display Validation Set Loss by running loss on all validation images.....................................................................
        if UseValidationSet and itr % 2000 == 0:
            SumLoss=np.float64(0.0)
            NBatches=np.int(np.ceil(ValidReader.NumFiles/ValidReader.BatchSize))
            print("Calculating Validation on " + str(ValidReader.NumFiles) + " Images")
            for i in range(NBatches):# Go over all validation image
                Images, GTLabels= ValidReader.ReadNextBatchClean() # load validation image and ground true labels
                feed_dict = {image: Images,GTLabel: GTLabels ,keep_prob: 1.0}
                # Calculate loss for all labels set
                TLoss = sess.run(Loss, feed_dict=feed_dict)
                SumLoss+=TLoss
                NBatches+=1
            SumLoss/=NBatches
            print("Validation Loss: "+str(SumLoss))
            with open(ValidLossTxtFile, "a") as f:
                f.write("\n" + str(itr) + "\t" + str(SumLoss))
                f.close()
 def generate(self):
     data = Data_Reader.read_red()
     data.insert(0, ["Number", "F.Name", "L.Name", "Grade", "Attendance %"])
     for i in range(len(data)):
         for i2 in range(5):
             label = Label(self.frame, text=data[i][i2], width=10).grid(row=i, column=i2)
#Classes = ["BackGround", "Empty Vessel","Liquid","Solid"] #names of classe the net predic
#Classes=["Background","Vessel"] #Classes predicted for vessel region prediction
Classes = ["BackGround", "Empty Vessel region", "Filled Vessel region"]  #
#Classes=["BackGround","Empty Vessel region","liquid","Solid"]
#Classes=["BackGround","Vessel","Liquid","Liquid Phase two","Suspension", "Emulsion","Foam","Solid","Gel","Powder","Granular","Bulk","Bulk Liquid","Solid Phase two","Vapor"]

# .........................Build FCN Net...............................................................................................
Net = FCN.Net(NumClasses=NUM_CLASSES)  #Build Net
Net.load_state_dict(torch.load(Trained_model_path))  # Load Traine model
print("Model weights loaded from: " + Trained_model_path)
Net.eval()
Net.half()
# -------------------------Data reader for validation image-----------------------------------------------------------------------------------------------------------------------------

ValidReader = Data_Reader.Data_Reader(
    Image_Dir, GTLabelDir=Label_Dir, BatchSize=1
)  # build reader that will be used to load images and labels from validation set

#--------------------Sum of intersection from all validation images for all classes and sum of union for all images and all classes----------------------------------------------------------------------------------
Union = np.float64(np.zeros(len(Classes)))  #Sum of union
Intersection = np.float64(np.zeros(len(Classes)))  #Sum of Intersection
fim = 0
print("Start Evaluating intersection over union for " +
      str(ValidReader.NumFiles) + " images")
#===========================GO over all validation images and caclulate IOU============================================================
while (ValidReader.itr < ValidReader.NumFiles):
    print(str(fim * 100.0 / ValidReader.NumFiles) + "%")
    fim += 1

    #.........................................Run Predictin/inference on validation................................................................................
    Images, GTLabels = ValidReader.ReadNextBatchClean(
#and  set Label_Dir and Image dir to the main dataset dir and the image dir respectively
# The overlay labes should appear in the OutDir
#--------------------------------------------------------------------------------------------------------------------
Label_Dir="Materials_In_Vessels/"# Main folder of the dataset
Image_Dir="Materials_In_Vessels/Train_Images/"#Images of the dataset
OutDir="Output/" # Library where the output images will be display
#-------------------------------------------------------------------------------------------------------------
import numpy as np
import scipy.misc as misc
import os
import Data_Reader
import OverrlayLabelOnImage as Overlay
#-------------------------------------------------------------------------------------------------------------------
NUM_CLASSES = 15+2+3+4 #Total number of classes in data_Set
################################################################################################################################################################################
Reader = Data_Reader.Data_Reader(Image_Dir, Label_Dir, 1) #initiate data set reader
if not os.path.exists(OutDir): os.makedirs(OutDir)
#---------------------------------------------------------------------------------------------------------------
print("Saving output to:" + OutDir)
fim = 0
 #---------------------------------------------------------------------------------------------------------------
print("Start Predicting " + str(Reader.NumFiles) + " images")
while (Reader.itr < Reader.NumFiles):
        print(str(fim * 100.0 / Reader.NumFiles) + "%")
        fim += 1
# ........................................Read image.................................................................................
        FileName=Reader.OrderedFiles[Reader.itr]
        Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsAllPhases = Reader.ReadNextBatchClean()  # Read images and ground truth annotation
# ............................Overlay label on image according to several set of labels and save in OutDir...............................................................
        w=1
        OverlayImg=np.concatenate((Images[0], Overlay.OverLayLiquidSolid(Images[0],LabelsVessel[0,:,:,0],w),Overlay.OverLayFillLevel(Images[0],  LabelsOnePhase[0,:,:,0], w),Overlay.OverLayLiquidSolid(Images[0],  LabelsSolidLiquid[0,:,:,0], w), Overlay.OverLayExactPhase(Images[0], LabelsAllPhases[0,:,:,0], w)), axis=1)