def main(argv=None): # .........................Placeholders for input image and labels........................................................................ image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class instance for the net Net.build(image) # Build net and load intial weights (weights before training) # -------------------------Data reader for validation/testing images----------------------------------------------------------------------------------------------------------------------------- sess = tf.Session() #Start Tensorflow session sess.run(tf.global_variables_initializer()) #---------------------------------------Get Activation map of all layers of the net----------------------------------------------- #-------------------------------------Create threads each thread display feature activation map in one color (RGB-------------------------------------------------------------------------------------- NumThreads=17 Pos=np.zeros(NumThreads,dtype=np.float32) # Position of thread (The intesnity the activation map is displayed Rate=np.zeros(NumThreads,dtype=np.float32) # Rate of change in the thread intensity Mx = np.zeros(NumThreads, dtype=np.float32) # Normalizing factor for the activation AcMap=np.zeros([NumThreads,Sy,Sx,1],dtype=np.float32) # Index of Activation map used bey thread TColor=np.zeros([NumThreads,3],dtype=np.float32) # Color of thread #-----------------------------Create animation--------------------------------------------------------------------------------- ImNum=0 for itr in range(NumFrame): if itr%(24*20)==0: if ImNum>=len(ImagesNames): break Image = misc.imread(ImagesNames[ImNum]) Image = misc.imresize(Image[:,:,0:3],[Sy,Sx]) Lr, ConIm = GetAllActivationMaps(Net, image, Image, sess) ImNum+=1 DispImg = np.zeros((Sy, Sx, 3), dtype=np.float32) # image to be display #time.sleep(0.01) print(itr) for i in range(NumThreads): #If thread reach max intensity start decrease intensity of the feature map if Pos[i]>=255: Pos[i]=255 Rate[i]=-np.abs(Rate[i]) if Pos[i]<=0: #If thread reach zero intensity replace the feature map the thread display Pos[i]=0 Rate[i]=np.random.rand()*7+0.2 Ly=np.random.randint(1, Lr.__len__()-1) # randomly pick layer AcMap[i]=np.expand_dims(ConIm[:,:,np.random.randint(Lr[Ly-1],Lr[Ly]+1)],axis=2) # Pick activtion map Mx[i]=1.0/AcMap[i].max() TColor[i]=[np.random.rand(),np.random.rand(),np.random.rand()] #TColor[i]*=255/TColor[i].max() DispImg+=(np.concatenate((TColor[i,0]*AcMap[i],TColor[i,1]*AcMap[i],TColor[i,2]*AcMap[i]),axis=2)*Mx[i]*Pos[i]) #np.uint8(Mx[i]*ConIm[:,:,AcMap[i]]*Pos[i]) # Create frame from the combination of the activation map use by each thread Pos[i]+=Rate[i] #DispImg/=NumThreads DispImg/=1.5 DispImg[DispImg>255]=255 #misc.imshow(DispImg*0.9+Image*0.1) #print(Rate) #print(Pos) #cv2.imshow("Anim",DispImg) VidOut.write(np.uint8(DispImg*1+Image*0)) # add frame to video VidOut.release() # close video print("Done")
def main(argv=None): tf.reset_default_graph() keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability # .........................Placeholders for input image and labels........................................................................................... image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # .........................Build FCN Net............................................................................................... Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class for the network Net.build(image, NUM_CLASSES, keep_prob) # Create the net and load intial weights # # -------------------------Data reader for validation image----------------------------------------------------------------------------------------------------------------------------- ValidReader = Data_Reader.Data_Reader(Image_Dir,GTLabelDir=Label_Dir, BatchSize=Batch_Size) # build reader that will be used to load images and labels from validation set #........................................................................................................................ sess = tf.Session() # Start Tensorflow session #--------Load trained model-------------------------------------------------------------------------------------------------- print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: # if print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path+"See TRAIN.py for training") sys.exit() #--------------------Sum of intersection from all validation images for all classes and sum of union for all images and all classes---------------------------------------------------------------------------------- Union = np.float64(np.zeros(len(Classes))) #Sum of union Intersection = np.float64(np.zeros(len(Classes))) #Sum of Intersection fim = 0 print("Start Evaluating intersection over union for "+str(ValidReader.NumFiles)+" images") #===========================GO over all validation images and caclulate IOU============================================================ while (ValidReader.itr<ValidReader.NumFiles): print(str(fim*100.0/ValidReader.NumFiles)+"%") fim+=1 #.........................................Run Predictin/inference on validation................................................................................ Images, GTLabels = ValidReader.ReadNextBatchClean() # Read images and ground truth annotation #Predict annotation using net PredictedLabels= sess.run(Net.Pred,feed_dict={image: Images,keep_prob: 1.0}) #............................Calculate Intersection and union for prediction............................................................... # print("-------------------------IOU----------------------------------------") CIOU,CU=IOU.GetIOU(PredictedLabels,GTLabels.squeeze(),len(Classes),Classes) #Calculate intersection over union Intersection+=CIOU*CU Union+=CU #-----------------------------------------Print results-------------------------------------------------------------------------------------- print("---------------------------Mean Prediction----------------------------------------") print("---------------------IOU=Intersection Over Inion----------------------------------") for i in range(len(Classes)): if Union[i]>0: print(Classes[i]+"\t"+str(Intersection[i]/Union[i]))
def main(argv=None): # .........................Placeholders for input image and labels........................................................................ keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class instance for the net Net.build(image, NUM_CLASSES, keep_prob) # Build net and load intial weights (weights before training) # -------------------------Data reader for validation/testing images----------------------------------------------------------------------------------------------------------------------------- ValidReader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1) #-------------------------Load Trained model if you dont have trained model see: Train.py----------------------------------------------------------------------------------------------------------------------------- sess = tf.Session() #Start Tensorflow session print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: "+ckpt.model_checkpoint_path+" See Train.py for creating train network ") sys.exit() #--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton--------------------------------------------------------------------------------------------------------------------------------------------- if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir) if not os.path.exists(Pred_Dir+"/OverLay"): os.makedirs(Pred_Dir+"/OverLay") if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label") print("Running Predictions:") print("Saving output to:" + Pred_Dir) #----------------------Go over all images and predict semantic segmentation in various of classes------------------------------------------------------------- fim = 0 print("Start Predicting " + str(ValidReader.NumFiles) + " images") while (ValidReader.itr < ValidReader.NumFiles): print(str(fim * 100.0 / ValidReader.NumFiles) + "%") fim += 1 # ..................................Load image....................................................................................... FileName=ValidReader.OrderedFiles[ValidReader.itr] #Get input image name Images = ValidReader.ReadNextBatchClean() # load testing image # Predict annotation using net LabelPred = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0}) #------------------------Save predicted labels overlay on images--------------------------------------------------------------------------------------------- misc.imsave(Pred_Dir + "/OverLay/"+ FileName+NameEnd , Overlay.OverLayLabelOnImage(Images[0],LabelPred[0], w)) #Overlay label on image misc.imsave(Pred_Dir + "/Label/" + FileName[:-4] + ".png" + NameEnd, LabelPred[0].astype(np.uint8))
def predict(img): tf.reset_default_graph() logs_dir = "/Users/anekisei/Documents/vertical_crop/logs/" # "path to logs directory where trained model and information will be stored" Image_Dir = "/Users/anekisei/Documents/vertical_crop/train_images/" # Test image folder model_path = "/Users/anekisei/Documents/vertical_crop/Model_Zoo/vgg16.npy" # "Path to pretrained vgg16 model for encoder" image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path, is_training=False) # Create class instance for the net logits = Net.build(image) sess = tf.Session() #Start Tensorflow session sess.run(tf.global_variables_initializer()) #print("Setting up Saver...") saver = tf.train.Saver() saver.restore( sess, "/Users/anekisei/Documents/vertical_crop/logs/model.ckpt-500") print "restore 500" ''' ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it #print "Restore from:", ckpt.model_checkpoint_path #saver.restore(sess, "/Users/anekisei/Documents/vertical_crop/logs/model.ckpt-2000") saver.restore(sess, ckpt.model_checkpoint_path) #print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: "+ckpt.model_checkpoint_path+" See Train.py for creating train network ") sys.exit() ''' feed_dict = {image: img} logits = sess.run(logits, feed_dict=feed_dict) logits = softmax(logits) ''' results = [] for i in range(logits.shape[0]): if logits[i,1] <= 0.8: print "healthy" results.append(True)#"health" else: print "disease" results.append(False)#"disease" sess.close() return results ''' return logits
def main(argv=None): tf.reset_default_graph() #.........................Placeholders for input image and labels........................................................................................... image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB GTLabel = tf.placeholder(tf.int32, shape=[None, 3], name="GTLabel")#Ground truth labels for training #.........................Build FCN Net............................................................................................... Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network feature = Net.build(image)# Create the net and load intial weights #......................................Get loss functions for neural net work one loss function for each set of label.................................................................................................... res = tf.placeholder(tf.float32, shape=[None, 3, 4, 512], name="input_image") c = C.Classifier(res) Loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=GTLabel,logits=c.classify(),name="Loss")) # Define loss function for training #....................................Create solver for the net............................................................................................ optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(Loss) #----------------------------------------Create reader for data set-------------------------------------------------------------------------------------------------------------- TrainReader = Data_Reader.Data_Reader(Train_Image_Dir) #Reader for training data sess = tf.Session() #Start Tensorflow session # -------------load trained model if exist----------------------------------------------------------------- print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) #Initialize variables ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") #--------------------------- Create files for saving loss---------------------------------------------------------------------------------------------------------- f = open(TrainLossTxtFile, "w") f.write("Iteration\tloss\t Learning Rate="+str(learning_rate)) f.close() #..............Start Training loop: Main Training.................................................................... for itr in range(MAX_ITERATION): print "itr:", itr Images, GTLabels = TrainReader.getBatch() # Load augmeted images and ground true labels for training feed_dict = {image:Images} output = sess.run(feature, feed_dict=feed_dict) feed_dict = {res:output,GTLabel:GTLabels} _, loss = sess.run([optimizer,Loss], feed_dict=feed_dict) # Train one cycle print "loss is,", loss # --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------ if itr % 230 == 0 and itr>0: print("Saving Model to file in "+logs_dir) saver.save(sess, logs_dir + "model.ckpt", itr) #Save model #......................Write and display train loss.......................................................................... '''
def predict(imagebatch): tf.reset_default_graph() logs_dir = "/Users/anekisei/Documents/Spine_project_horizontal/classifier/logs/" # "path to logs directory where trained model and information will be stored" Image_Dir = "/Users/anekisei/Documents/Spine_project_vertical/test_images/" # Test image folder model_path = "/Users/anekisei/Documents/Spine_project_vertical/FCN_segment/Model_Zoo/vgg16.npy" # "Path to pretrained vgg16 model for encoder" image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path) # Create class instance for the net feature = Net.build(image) res = tf.placeholder(tf.float32, shape=[None, 3, 4, 512], name="input_image") c = C.Classifier(res) logits = c.classify() sess = tf.Session() #Start Tensorflow session sess.run(tf.global_variables_initializer()) #print("Setting up Saver...") saver = tf.train.Saver() ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it print "Restore model from:", ckpt.model_checkpoint_path saver.restore(sess, ckpt.model_checkpoint_path) #print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path + " See Train.py for creating train network ") sys.exit() feed_dict = {image: imagebatch} output = sess.run(feature, feed_dict=feed_dict) feed_dict = {res: output} logits = sess.run(logits, feed_dict=feed_dict) # Train one cycle predicts = np.argmax(logits, axis=1) return predicts
def main(argv=None): keep_prob = tf.placeholder( tf.float32, name="keep_probabilty") # Dropout probability for training image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height fourth dimension RGB VesselLabel = tf.placeholder( tf.int32, shape=[None, None, None, 1], name="VesselLabel" ) # Label image for vessel background prediction use as ROI input mask for the net #-------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path) # Create class instance for the net Net.build( image, VesselLabel, NUM_CLASSES, keep_prob ) # Build net and load intial weights from vgg16 (weights before training) # -------------------------Data reader for validation image----------------------------------------------------------------------------------------------------------------------------- ValidReader = Data_Reader.Data_Reader( Image_Dir, Label_Dir, Batch_Size ) # build reader that will be used to load images and labels from validation set sess = tf.Session() # Start Tensorflow session #--------Load trained model-------------------------------------------------------------------------------------------------- print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: # if print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path + "See TRAIN.py for training") print( "or download pretrained model from" " https://drive.google.com/file/d/0B6njwynsu2hXRDMxWlBUTWFZM2c/view?usp=sharing " "and extract in log_dir") sys.exit() #--------------------Sum of intersection from all validation images for all classes and sum of union for all images and all classes---------------------------------------------------------------------------------- VesUn = np.float64(np.zeros( len(VesseClasses))) #Sum of union for vessel region prediciton VesIn = np.float64(np.zeros(len( VesseClasses))) #Sum of Intersection for vessel region prediciton PhaseUn = np.float64(np.zeros( len(PhaseClasses))) #Sum of union for for fill level prediction PhaseIn = np.float64(np.zeros( len(PhaseClasses))) #Sum of Intersection for fill level prediction LiqSolUn = np.float64(np.zeros( len(LiquidSolidClasses))) #Sum of union for liquid solid prediction LiqSolIn = np.float64(np.zeros(len( LiquidSolidClasses))) #Sum of Intersection for liquid solid prediction ExactPhaseUn = np.float64(np.zeros(len(ExactPhaseClasses))) ExactPhaseIn = np.float64(np.zeros(len(ExactPhaseClasses))) fim = 0 print("Start Evaluating intersection over union for " + str(ValidReader.NumFiles) + " images") #===========================GO over all validation images and caclulate IOU============================================================ while (ValidReader.itr < ValidReader.NumFiles): print(str(fim * 100.0 / ValidReader.NumFiles) + "%") fim += 1 #.........................................Run Predictin/inference on validation................................................................................ Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsExactPhase = ValidReader.ReadNextBatchClean( ) # Read images and ground truth annotation #Predict annotation using net ExactPhase, LiquidSolid, OnePhase, Vessel = sess.run([ Net.ExactPhasePred, Net.LiquidSolidPred, Net.PhasePred, Net.VesselPred ], feed_dict={ image: Images, keep_prob: 1.0, VesselLabel: LabelsVessel }) #............................Calculate Intersection and union for prediction............................................................... # print("-------------------------Vessel IOU----------------------------------------") CIOU, CU = IOU.GetIOU(Vessel, LabelsVessel.squeeze(), len(VesseClasses), VesseClasses) VesIn += CIOU * CU VesUn += CU # print("------------------------One Phase IOU----------------------------------------") CIOU, CU = IOU.GetIOU(OnePhase, LabelsOnePhase.squeeze(), len(PhaseClasses), PhaseClasses) PhaseIn += CIOU * CU PhaseUn += CU # print("--------------------------Liquid Solid IOU-----------------------------------") CIOU, CU = IOU.GetIOU(LiquidSolid, LabelsSolidLiquid.squeeze(), len(LiquidSolidClasses), LiquidSolidClasses) LiqSolIn += CIOU * CU LiqSolUn += CU # print("----------------------All Phases Phase IOU----------------------------------------") CIOU, CU = IOU.GetIOU(ExactPhase, LabelsExactPhase.squeeze(), len(ExactPhaseClasses), ExactPhaseClasses) ExactPhaseIn += CIOU * CU ExactPhaseUn += CU #-----------------------------------------Print results-------------------------------------------------------------------------------------- print( "----------------------------------------------------------------------------------" ) print( "---------------------------Mean Prediction----------------------------------------" ) print( "---------------------IOU=Intersection Over Inion------------------------------------------------------" ) # ------------------------------------------------------------------------------------------------------------ print( "-------------------------Vessel IOU----------------------------------------" ) for i in range(len(VesseClasses)): if VesUn[i] > 0: print(VesseClasses[i] + "\t" + str(VesIn[i] / VesUn[i])) print( "------------------------One Phase IOU----------------------------------------" ) for i in range(len(PhaseClasses)): if PhaseUn[i] > 0: print(PhaseClasses[i] + "\t" + str(PhaseIn[i] / PhaseUn[i])) print( "--------------------------Liquid Solid IOU-----------------------------------" ) for i in range(len(LiquidSolidClasses)): if LiqSolUn[i] > 0: print(LiquidSolidClasses[i] + "\t" + str(LiqSolIn[i] / LiqSolUn[i])) print( "----------------------All Phases Phase IOU----------------------------------------" ) for i in range(len(ExactPhaseClasses)): if ExactPhaseUn[i] > 0: print(ExactPhaseClasses[i] + "\t" + str(ExactPhaseIn[i] / ExactPhaseUn[i]))
def main(argv=None): tf.reset_default_graph() keep_prob= tf.placeholder(tf.float32, name="keep_probabilty") #Dropout probability #.........................Placeholders for input image and labels........................................................................................... image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="GTLabel")#Ground truth labels for training #.........................Build FCN Net............................................................................................... Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network Net.build(image, NUM_CLASSES,keep_prob)# Create the net and load intial weights #......................................Get loss functions for neural net work one loss function for each set of label.................................................................................................... Loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(GTLabel, squeeze_dims=[3]), logits=Net.Prob,name="Loss"))) # Define loss function for training #....................................Create solver for the net............................................................................................ trainable_var = tf.trainable_variables() # Collect all trainable variables for the net train_op = train(Loss, trainable_var) #Create Train Operation for the net #----------------------------------------Create reader for data set-------------------------------------------------------------------------------------------------------------- TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, GTLabelDir=Train_Label_Dir,BatchSize=Batch_Size) #Reader for training data if UseValidationSet: ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, GTLabelDir=Valid_Labels_Dir,BatchSize=Batch_Size) # Reader for validation data sess = tf.Session() #Start Tensorflow session # -------------load trained model if exist----------------------------------------------------------------- print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) #Initialize variables ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") #--------------------------- Create files for saving loss---------------------------------------------------------------------------------------------------------- f = open(TrainLossTxtFile, "w") f.write("Iteration\tloss\t Learning Rate="+str(learning_rate)) f.close() if UseValidationSet: f = open(ValidLossTxtFile, "w") f.write("Iteration\tloss\t Learning Rate=" + str(learning_rate)) f.close() #..............Start Training loop: Main Training.................................................................... for itr in range(MAX_ITERATION): print(itr) Images, GTLabels =TrainReader.ReadAndAugmentNextBatch() # Load augmeted images and ground true labels for training feed_dict = {image: Images,GTLabel:GTLabels, keep_prob: 0.5} sess.run(train_op, feed_dict=feed_dict) # Train one cycle # --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------ if itr % 500 == 0 and itr>0: print("saving here Rhodri's Version") print("Saving Model to file in "+logs_dir) saver.save(sess, logs_dir + "model.ckpt", itr) #Save model #......................Write and display train loss.......................................................................... if itr % 10==0: # Calculate train loss feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1} TLoss=sess.run(Loss, feed_dict=feed_dict) print("Step "+str(itr)+" Train Loss="+str(TLoss)) #Write train loss to file with open(TrainLossTxtFile, "a") as f: f.write("\n"+str(itr)+"\t"+str(TLoss)) f.close() #......................Write and display Validation Set Loss by running loss on all validation images..................................................................... if UseValidationSet and itr % 2000 == 0: SumLoss=np.float64(0.0) NBatches=np.int(np.ceil(ValidReader.NumFiles/ValidReader.BatchSize)) print("Calculating Validation on " + str(ValidReader.NumFiles) + " Images") for i in range(NBatches):# Go over all validation image Images, GTLabels= ValidReader.ReadNextBatchClean() # load validation image and ground true labels feed_dict = {image: Images,GTLabel: GTLabels ,keep_prob: 1.0} # Calculate loss for all labels set TLoss = sess.run(Loss, feed_dict=feed_dict) SumLoss+=TLoss NBatches+=1 SumLoss/=NBatches print("Validation Loss: "+str(SumLoss)) with open(ValidLossTxtFile, "a") as f: f.write("\n" + str(itr) + "\t" + str(SumLoss)) f.close()
def main(): # Placeholders for input image and labels keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # Build the neural network Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path) # Create class instance for the net Net.build(image, NUM_CLASSES, keep_prob ) # Build net and load intial weights (weights before training) # Data reader for validation/testing images ValidReader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1) # Start Tensorflow session sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) # Load model from checkpoint ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path + " See Train.py for creating train network ") sys.exit() # Create output directories for predicted label, one folder for each granulairy of label prediciton if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir) if not os.path.exists(Pred_Dir + "/OverLay"): os.makedirs(Pred_Dir + "/OverLay") if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label") print("Running Predictions:") print("Saving output to:" + Pred_Dir) # Iterate through images and predict semantic segmentation for test set print("Start Predicting " + str(ValidReader.NumFiles) + " images") fim = 0 while (ValidReader.itr < ValidReader.NumFiles): # Load image FileName = ValidReader.OrderedFiles[ ValidReader.itr] #Get input image name Images = ValidReader.ReadNextBatchClean() # load testing image # Predict annotation using neural net LabelPred = sess.run(Net.Pred, feed_dict={ image: Images, keep_prob: 1.0 }) # Save predicted labels overlay on images misc.imsave(Pred_Dir + "/OverLay/" + FileName, Overlay.OverLayLabelOnImage(Images[0], LabelPred[0], w)) #Overlay label on image misc.imsave(Pred_Dir + "/Label/" + FileName[:-4] + ".png", LabelPred[0].astype(np.uint8)) #np.save(Pred_Dir + "/Probs/" + FileName[:-4] + ".npy", probs) fim += 1 print("{:2.2f}%".format(fim * 100.0 / ValidReader.NumFiles)) # Iterate through images and predict semantic segmentation for validation set if not os.path.exists(Valid_Pred_Dir + "/OverLay"): os.makedirs(Valid_Pred_Dir + "/OverLay") if not os.path.exists(Valid_Pred_Dir + "/Probs"): os.makedirs(Valid_Pred_Dir + "/Probs") if not os.path.exists(Valid_Pred_Dir + "/Label"): os.makedirs(Valid_Pred_Dir + "/Label") print("Validating on " + str(ValidReader.NumFiles) + " images") ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, GTLabelDir=Valid_Labels_Dir, BatchSize=1) roc = ROC(NUM_CLASSES) fim = 0 while (ValidReader.itr < ValidReader.NumFiles): # Load image FileName = ValidReader.OrderedFiles[ ValidReader.itr] # Get input image name Images, GTLabels = ValidReader.ReadNextBatchClean( ) # load validation image and ground truth labels # Predict annotation using neural net LabelPred = sess.run(Net.Pred, feed_dict={ image: Images, keep_prob: 1.0 }) # Get probabilities LabelProb = sess.run(Net.Prob, feed_dict={ image: Images, keep_prob: 1.0 }) sess1 = tf.InteractiveSession() probs = np.squeeze(tf.nn.softmax(LabelProb).eval()) # Import data to ROC object roc.add_data(np.squeeze(GTLabels), probs, np.squeeze(LabelPred)) # Save predicted labels overlay on images misc.imsave(Valid_Pred_Dir + "/OverLay/" + FileName, Overlay.OverLayLabelOnImage(Images[0], LabelPred[0], w)) #Overlay label on image misc.imsave(Valid_Pred_Dir + "/Label/" + FileName[:-4] + ".png", LabelPred[0].astype(np.uint8)) np.save(Valid_Pred_Dir + "/Probs/" + FileName[:-4] + ".npy", probs) fim += 1 print("{:2.2f}%".format(fim * 100.0 / ValidReader.NumFiles)) #import pdb; pdb.set_trace() sess1.close() # Compute accuracy, precision, recall, and f-1 score acc = roc.accuracy() print(roc.report) print("Total Accuracy: {:3.2f}".format(acc))
def main(argv=None): tf.reset_default_graph() keep_prob= tf.placeholder(tf.float32, name="keep_probabilty") #Dropout probability #.........................Placeholders for input image and labels........................................................................................... image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") #Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB VesselLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="VesselLabel") # Label image for vessel background prediction PhaseLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="PhaseLabel")#Label image for Vessel Full and background prediction LiquidSolidLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="LiquidSolidLabel") # Label image for liquid solid vessel background prediction AllPhasesLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="AllPhasesLabel") # Label image for fine grain phase prediction (liquid solid powder foam #.........................Build FCN Net............................................................................................... Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) #Create class for the network Net.build(image,NUM_CLASSES,keep_prob)# Create the net and load intial weights #......................................Get loss functions for neural net work one loss function for each set of label.................................................................................................... VesselLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(VesselLabel, squeeze_dims=[3]), logits=Net.VesselProb,name="VesselLoss"))) # Define loss function for training PhaseLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(PhaseLabel, squeeze_dims=[3]), logits=Net.PhaseProb,name="PhaseLoss"))) # Define loss function for training LiquidSolidLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(LiquidSolidLabel, squeeze_dims=[3]), logits=Net.LiquidSolidProb,name="LiquidSolidLoss"))) # Define loss function for training AllPhasesLoss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.squeeze(AllPhasesLabel, squeeze_dims=[3]), logits=Net.AllPhasesProb,name="PhaseLabel"))) # Define loss function for training WeightDecayLoss=Net.SumWeights*Weight_Loss_Rate #Weight decay loss TotalLoss=VesselLoss+PhaseLoss+LiquidSolidLoss+AllPhasesLoss+WeightDecayLoss# Loss is the sum of loss for all categories #....................................Create solver for the net............................................................................................ trainable_var = tf.trainable_variables() train_op = train(TotalLoss, trainable_var) #----------------------------------------Create reader for data set-------------------------------------------------------------------------------------------------------------- TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, Label_Dir,Batch_Size) #Reader for training data ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, Label_Dir, Batch_Size) # Reader for validation data sess = tf.Session() #Start Tensorflow session # -------------load trained model if exist----------------------------------------------------------------- print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") #---------------------------Start Training: Create loss files for saving loss during traing ---------------------------------------------------------------------------------------------------------- f = open(TrainLossTxtFile, "w") f.write("Iteration\tTotal_Loss\tVessel Loss\tOne_Phase_Loss%\tLiquid_Solid_Loss\tAll_Phases_loss\t Learning Rate="+str(learning_rate)) f.close() f = open(ValidLossTxtFile, "w") f.write("Iteration\tTotal_Loss\tVessel Loss\tOne_Phase_Loss%\tLiquid_Solid_Loss\tAll_Phases_loss\t Learning Rate=" + str(learning_rate)) f.close() #..............Start Training loop: Main Training.................................................................... for itr in range(MAX_ITERATION): Images,LabelsVessel,LabelsOnePhase,LabelsSolidLiquid,LabelsAllPhases=TrainReader.ReadAndAugmentNextBatch() # Load augmeted images and ground true labels for training feed_dict = {image: Images,VesselLabel:LabelsVessel, PhaseLabel: LabelsOnePhase,LiquidSolidLabel:LabelsSolidLiquid,AllPhasesLabel:LabelsAllPhases, keep_prob: 0.5} sess.run(train_op, feed_dict=feed_dict) # Train one cycle # --------------Save trained model------------------------------------------------------------------------------------------------------------------------------------------ if itr % 20000 == 0: saver.save(sess, logs_dir + "model.ckpt", itr) #......................Write and display train loss.......................................................................... if itr % 10==0: # Calculate train loss Tot_Loss,Ves_Loss,Phase_Loss,LiquidSolid_Loss,AllPhase_Loss= sess.run([TotalLoss,VesselLoss, PhaseLoss,LiquidSolidLoss,AllPhasesLoss], feed_dict=feed_dict) print("Step: %d, Total_loss:%g, Vessel_Loss:%g, OnePhases_Loss:%g, LiquidSolid_Loss:%g, AllPhases_Loss:%g," % (itr, Tot_Loss,Ves_Loss,Phase_Loss,LiquidSolid_Loss,AllPhase_Loss)) #Write train loss to file with open(TrainLossTxtFile, "a") as f: f.write("\n"+str(itr)+"\t"+str(Tot_Loss)+"\t"+str(Ves_Loss)+"\t"+str(Phase_Loss)+"\t"+str(LiquidSolid_Loss)+"\t"+str(AllPhase_Loss)) f.close() #......................Write and display Validation Set Loss by running loss on all validation images..................................................................... if itr % 500 == 0: SumTotalLoss=np.float64(0.0) SumVesselLoss = np.float64(0.0) SumOnePhassLoss = np.float64(0.0) SumLiquidSolidLoss= np.float64(0.0) SumAllPhase_Loss= np.float64(0.0) NBatches=np.int(np.ceil(ValidReader.NumFiles/ValidReader.BatchSize)) for i in range(NBatches):# Go over all validation image Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsAllPhases = ValidReader.ReadNextBatchClean() # load validation image and ground true labels feed_dict = {image: Images, VesselLabel: LabelsVessel, PhaseLabel: LabelsOnePhase,LiquidSolidLabel: LabelsSolidLiquid, AllPhasesLabel: LabelsAllPhases,keep_prob: 1} # Calculate loss for all labels set Tot_Loss, Ves_Loss, Phase_Loss, LiquidSolid_Loss, AllPhase_Loss = sess.run([TotalLoss, VesselLoss, PhaseLoss, LiquidSolidLoss, AllPhasesLoss], feed_dict=feed_dict) SumTotalLoss+=Tot_Loss SumVesselLoss+=Ves_Loss SumOnePhassLoss+=Phase_Loss SumLiquidSolidLoss+=LiquidSolid_Loss SumAllPhase_Loss+=AllPhase_Loss NBatches+=1 SumTotalLoss/=NBatches SumVesselLoss /= NBatches SumOnePhassLoss /= NBatches SumLiquidSolidLoss/=NBatches SumAllPhase_Loss/= NBatches print("Validation Total_loss:%g, Vessel_Loss:%g, OnePhases_Loss:%g, LiquidSolid_Loss:%g, AllPhases_Loss:%g," % (SumTotalLoss, SumVesselLoss, SumOnePhassLoss, SumLiquidSolidLoss, SumAllPhase_Loss)) with open(ValidLossTxtFile, "a") as f: f.write("\n" + str(itr) + "\t" + str(SumTotalLoss) + "\t" + str(SumVesselLoss) + "\t" + str(SumOnePhassLoss) + "\t" + str(SumLiquidSolidLoss) + "\t" + str(SumAllPhase_Loss)) f.close()
def main(argv=None): # .........................Placeholders for input image and labels........................................................................ keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path) # Create class instance for the net Net.build(image, NUM_CLASSES, keep_prob ) # Build net and load intial weights (weights before training) # -------------------------Data reader for validation/testing images----------------------------------------------------------------------------------------------------------------------------- ValidReader = Data_Reader.Data_Reader(Image_Dir, BatchSize=1) # print(ValidReader) # exit() #-------------------------Load Trained model if you dont have trained model see: Train.py----------------------------------------------------------------------------------------------------------------------------- sess = tf.Session() #Start Tensorflow session print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path + " See Train.py for creating train network ") sys.exit() #--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton--------------------------------------------------------------------------------------------------------------------------------------------- if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir) if not os.path.exists(Pred_Dir + "/OverLay"): os.makedirs(Pred_Dir + "/OverLay") if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label") print("Running Predictions:") print("Saving output to:" + path_XML) #----------------------Go over all images and predict semantic segmentation in various of classes------------------------------------------------------------- fim = 0 print("Start Predicting " + str(ValidReader.NumFiles) + " images") startTime = time.time() while (ValidReader.itr < ValidReader.NumFiles): # ..................................Load image....................................................................................... FileName = ValidReader.OrderedFiles[ ValidReader.itr] #Get input image name Images = ValidReader.ReadNextBatchClean() # load testing image # Predict annotation using net LabelPred = sess.run(Net.Pred, feed_dict={ image: Images, keep_prob: 1.0 }) #------------------------Save predicted labels overlay on images--------------------------------------------------------------------------------------------- endTimePredict = time.time() print('\n\nTime predict image', FileName, ' : ', endTimePredict - startTime) ImageResult = Images[0].copy() LabelResult = LabelPred[0].copy() # print('Label shape : ',LabelResult.shape) # print('Images shape: ',ImageResult.shape) LabelResult = LabelResult.astype(np.uint8) # print('width_ORG,height_ORG', width_ORG,height_ORG) # print('Images shape after resize: ',ImageResult.shape) imgORG = cv2.imread(Image_Read + FileName) height_ORG, width_ORG, _ = imgORG.shape print('imgORG shape ', imgORG.shape) print('ListSize : ', ListSize[fim][1], ' ', ListSize[fim][0]) ImageResult = cv2.resize(ImageResult, (width_ORG, height_ORG)) # LabelResult = cv2.resize(LabelResult, (ListSize[fim][1], ListSize[fim][0])) LabelResult = cv2.resize(LabelResult, (width_ORG, height_ORG)) # file_mask=path_mask+FileName # cv2.imwrite(file_mask,LabelResult) # ImageResult = cv2.resize(ImageResult, (height_ORG, width_ORG)) # LabelResult = cv2.resize(LabelResult, (height_ORG, width_ORG)) # print('min LabelResult',LabelResult.min()) # print('max LabelResult',LabelResult.max()) CreateXML.SaveXML(LabelResult, FileName, path_XML) print('Time CreateXML image', FileName, ' : ', time.time() - endTimePredict) startTime = time.time() fim += 1 print('Processing : ', str(fim * 100.0 / ValidReader.NumFiles) + "%")
def main(argv=None): keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB VesselLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="VesselLabel") # Label image for vessel background prediction # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class instance for the net Net.build(image, VesselLabel, NUM_CLASSES, keep_prob) # Build net and load intial weights (weights before training) # -------------------------Data reader for validation/testing images----------------------------------------------------------------------------------------------------------------------------- ValidReader = Data_Reader.Data_Reader(Image_Dir, Label_Dir, 1) #-------------------------Load Trained model if you dont have trained model see: Train.py----------------------------------------------------------------------------------------------------------------------------- sess = tf.Session() #Start Tensorflow session print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: "+ckpt.model_checkpoint_path+" See Train.py for creating train network ") print("or download pretrained model from" "https://drive.google.com/file/d/0B6njwynsu2hXRFpmY1pOV1A4SFE/view?usp=sharing" "and extract in log_dir") sys.exit() #--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton--------------------------------------------------------------------------------------------------------------------------------------------- if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir) if not os.path.exists(Pred_Dir+"/OverLay"): os.makedirs(Pred_Dir+"/OverLay") if not os.path.exists(Pred_Dir + "/OverLay/Vessel/"): os.makedirs(Pred_Dir + "/OverLay/Vessel/") if not os.path.exists(Pred_Dir + "/OverLay/OnePhase/"): os.makedirs(Pred_Dir + "/OverLay/OnePhase/") if not os.path.exists(Pred_Dir + "/OverLay/LiquiSolid/"): os.makedirs(Pred_Dir + "/OverLay/LiquiSolid/") if not os.path.exists(Pred_Dir + "/OverLay/ExactPhase/"): os.makedirs(Pred_Dir + "/OverLay/ExactPhase/") if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label") if not os.path.exists(Pred_Dir + "/Label/Vessel/"): os.makedirs(Pred_Dir + "/Label/Vessel/") if not os.path.exists(Pred_Dir + "/Label/OnePhase/"): os.makedirs(Pred_Dir + "/Label/OnePhase/") if not os.path.exists(Pred_Dir + "/Label/LiquiSolid/"): os.makedirs(Pred_Dir + "/Label/LiquiSolid/") if not os.path.exists(Pred_Dir + "/Label/ExactPhase/"): os.makedirs(Pred_Dir + "/Label/ExactPhase/") if not os.path.exists(Pred_Dir + "/AllPredicitionsDisplayed/"): os.makedirs(Pred_Dir + "/AllPredicitionsDisplayed/") print("Running Predictions:") print("Saving output to:" + Pred_Dir) #----------------------Go over all images and predict semantic segmentation in various of classes------------------------------------------------------------- fim = 0 print("Start Predicting " + str(ValidReader.NumFiles) + " images") while (ValidReader.itr < ValidReader.NumFiles): print(str(fim * 100.0 / ValidReader.NumFiles) + "%") fim += 1 # ..................................Load image....................................................................................... FileName=ValidReader.OrderedFiles[ValidReader.itr] Images, LabelsVessel, LabelsOnePhase, LabelsSolidLiquid, LabelsExactPhase = ValidReader.ReadNextBatchClean() # Read images and ground truth annotation # Predict annotation using net ExactPhase, LiquidSolid, OnePhase, Vessel = sess.run( [Net.ExactPhasePred, Net.LiquidSolidPred, Net.PhasePred, Net.VesselPred], feed_dict={image: Images, keep_prob: 1.0, VesselLabel: LabelsVessel}) #------------------------Save predicted labels overlay on images--------------------------------------------------------------------------------------------- misc.imsave(Pred_Dir + "/OverLay/Vessel/"+ FileName+NameEnd , Overlay.OverLayLiquidSolid(Images[0],Vessel[0], w)) misc.imsave(Pred_Dir + "/Label/OnePhase/" + FileName + NameEnd, OnePhase[0]) misc.imsave(Pred_Dir + "/OverLay/OnePhase/" + FileName + NameEnd,Overlay.OverLayFillLevel(Images[0], OnePhase[0], w)) misc.imsave(Pred_Dir + "/Label/LiquiSolid/" + FileName + NameEnd, LiquidSolid[0]) misc.imsave(Pred_Dir + "/OverLay/LiquiSolid/" + FileName + NameEnd,Overlay.OverLayLiquidSolid(Images[0], LiquidSolid[0], w)) misc.imsave(Pred_Dir + "/Label/ExactPhase/" + FileName + NameEnd, ExactPhase[0]) misc.imsave(Pred_Dir + "/OverLay/ExactPhase/" + FileName + NameEnd,Overlay.OverLayExactPhase(Images[0], ExactPhase[0], w)) misc.imsave(Pred_Dir + "/AllPredicitionsDisplayed/" + FileName+ NameEnd,np.concatenate((Images[0], Overlay.OverLayLiquidSolid(Images[0],Vessel[0],w),Overlay.OverLayFillLevel(Images[0], OnePhase[0], w),Overlay.OverLayLiquidSolid(Images[0], LiquidSolid[0], w), Overlay.OverLayExactPhase(Images[0], ExactPhase[0], w)), axis=1))
def main(argv=None): keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB #-------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path) # Create class for the network Net.build(image, NUM_CLASSES, keep_prob ) # Build net and load intial weights (weights before training) #----------------------Create list of images for annotation prediction (all images in Image_Dir)----------------------------------------------- ImageFiles = [] #Create list of images in Image_Dir for label prediction ImageFiles += [ each for each in os.listdir(Image_Dir) if each.endswith('.PNG') or each.endswith('.JPG') or each.endswith( '.TIF') or each.endswith('.GIF') or each.endswith('.png') or each.endswith('.jpg') or each.endswith('.tif') or each.endswith('.gif') ] # Get list of training images print('Number of images=' + str(len(ImageFiles))) #-------------------------Load Trained model if you dont have trained model see: Train.py----------------------------------------------------------------------------------------------------------------------------- sess = tf.Session() #Start Tensorflow session print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # if train model exist restore it saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") else: print("ERROR NO TRAINED MODEL IN: " + ckpt.model_checkpoint_path + " See Train.py for creating train network ") print( "or download from: " "https://drive.google.com/file/d/0B6njwynsu2hXWi1YZ3JKRmdLOWc/view?usp=sharing" " and extract in log_dir") sys.exit() #--------------------Create output directories for predicted label, one folder for each granulairy of label prediciton--------------------------------------------------------------------------------------------------------------------------------------------- if not os.path.exists(Pred_Dir): os.makedirs(Pred_Dir) if not os.path.exists(Pred_Dir + "/OverLay"): os.makedirs(Pred_Dir + "/OverLay") if not os.path.exists(Pred_Dir + "/OverLay/Vessel/"): os.makedirs(Pred_Dir + "/OverLay/Vessel/") if not os.path.exists(Pred_Dir + "/OverLay/OnePhase/"): os.makedirs(Pred_Dir + "/OverLay/OnePhase/") if not os.path.exists(Pred_Dir + "/OverLay/LiquiSolid/"): os.makedirs(Pred_Dir + "/OverLay/LiquiSolid/") if not os.path.exists(Pred_Dir + "/OverLay/AllPhases/"): os.makedirs(Pred_Dir + "/OverLay/AllPhases/") if not os.path.exists(Pred_Dir + "/Label"): os.makedirs(Pred_Dir + "/Label") if not os.path.exists(Pred_Dir + "/Label/Vessel/"): os.makedirs(Pred_Dir + "/Label/Vessel/") if not os.path.exists(Pred_Dir + "/Label/OnePhase/"): os.makedirs(Pred_Dir + "/Label/OnePhase/") if not os.path.exists(Pred_Dir + "/Label/LiquiSolid/"): os.makedirs(Pred_Dir + "/Label/LiquiSolid/") if not os.path.exists(Pred_Dir + "/Label/AllPhases/"): os.makedirs(Pred_Dir + "/Label/AllPhases/") if not os.path.exists(Pred_Dir + "/AllPredicitionsDisplayed/"): os.makedirs(Pred_Dir + "/AllPredicitionsDisplayed/") print("Running Predictions:") print("Saving output to:" + Pred_Dir) #----------------------Go over all images and predict semantic segmentation in various of classes------------------------------------------------------------- for fim in range(len(ImageFiles)): FileName = ImageFiles[fim] print(str(fim) + ") " + Image_Dir + FileName) Images = LoadImage(Image_Dir + FileName) # Load nex image #Run net for label prediction AllPhases, LiquidSolid, OnePhase, Vessel = sess.run([ Net.AllPhasesPred, Net.LiquidSolidPred, Net.PhasePred, Net.VesselPred ], feed_dict={ image: Images, keep_prob: 1.0 }) #------------------------Save predicted labels and the labels in /label/ folder, and the label overlay on the image in /overlay/ folder misc.imsave(Pred_Dir + "/Label/Vessel/" + FileName + NameEnd, Vessel[0]) misc.imsave(Pred_Dir + "/OverLay/Vessel/" + FileName + NameEnd, Overlay.OverLayLiquidSolid(Images[0], Vessel[0], w)) misc.imsave(Pred_Dir + "/Label/OnePhase/" + FileName + NameEnd, OnePhase[0]) misc.imsave(Pred_Dir + "/OverLay/OnePhase/" + FileName + NameEnd, Overlay.OverLayFillLevel(Images[0], OnePhase[0], w)) misc.imsave(Pred_Dir + "/Label/LiquiSolid/" + FileName + NameEnd, LiquidSolid[0]) misc.imsave(Pred_Dir + "/OverLay/LiquiSolid/" + FileName + NameEnd, Overlay.OverLayLiquidSolid(Images[0], LiquidSolid[0], w)) misc.imsave(Pred_Dir + "/Label/AllPhases/" + FileName + NameEnd, AllPhases[0]) misc.imsave(Pred_Dir + "/OverLay/AllPhases/" + FileName + NameEnd, Overlay.OverLayExactPhase(Images[0], AllPhases[0], w)) misc.imsave( Pred_Dir + "/AllPredicitionsDisplayed/" + FileName + NameEnd, np.concatenate( (Images[0], Overlay.OverLayLiquidSolid( Images[0], Vessel[0], w), Overlay.OverLayFillLevel(Images[0], OnePhase[0], w), Overlay.OverLayLiquidSolid(Images[0], LiquidSolid[0], w), Overlay.OverLayExactPhase(Images[0], AllPhases[0], w)), axis=1))
def main(argv=None): tf.reset_default_graph() keep_prob = tf.placeholder(tf.float32, name="keep_probabilty") # Dropout probability # Placeholders for input image and labels image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name="input_image") GTLabel = tf.placeholder(tf.int32, shape=[None, None, None, 1], name="GTLabel") # Build FCN Network Net = BuildNetVgg16.BUILD_NET_VGG16(vgg16_npy_path=model_path) # Create class for the network Net.build(image, NUM_CLASSES, keep_prob) # Create the net and load intial weights # Get loss functions for neural net work one loss function for each set of labels Loss = -tf.reduce_sum(tf.multiply(tf.to_float(tf.one_hot(tf.squeeze(GTLabel, squeeze_dims=[3]), NUM_CLASSES)), tf.log(tf.nn.softmax(Net.Prob) + 1e-12))) # Create solver for the net trainable_var = tf.trainable_variables() # Collect all trainable variables for the net train_op = train(Loss, trainable_var) # Create the train operation for the net # Create reader for training data TrainReader = Data_Reader.Data_Reader(Train_Image_Dir, \ GTLabelDir=Train_Label_Dir, BatchSize=Batch_Size) # Create reader for validation data if UseValidationSet: ValidReader = Data_Reader.Data_Reader(Valid_Image_Dir, \ GTLabelDir=Valid_Labels_Dir, BatchSize=Validation_Batch_Size) # Start TensorFlow session sess = tf.Session() print("Setting up Saver...") saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) # Initialize variables ckpt = tf.train.get_checkpoint_state(logs_dir) if ckpt and ckpt.model_checkpoint_path: # Restore trained model, if it exists saver.restore(sess, ckpt.model_checkpoint_path) print("Model restored...") # Create files for logging progress f = open(TrainLossTxtFile, "w") f.write("Training Loss\n") f.write("Learning_rate\t" + str(learning_rate) + "\n") f.write("Batch_size\t" + str(Batch_Size) + "\n") f.write("Itr\tLoss") f.close() if UseValidationSet: f = open(ValidRecTxtFile, "w") f.write("Validation Record\n") f.write("Learning_rate\t" + str(learning_rate) + "\n") f.write("Batch_size\t" + str(Batch_Size) + "\n") f.write("Itr\tLoss\tAccuracy") f.close() # Start Training loop: Main Training for itr in range(MAX_ITERATION): if UseStochasticity: Images, GTLabels = TrainReader.ReadAndAugmentNextBatch() # Load images and labels else: Images, GTLabels = TrainReader.ReadNextBatchClean() feed_dict = {image: Images,GTLabel:GTLabels, keep_prob: 0.5} sess.run(train_op, feed_dict=feed_dict) # Train one cycle # Save trained model if itr % 500==0 and itr>0: print("Saving Model to file in " + logs_dir) saver.save(sess, os.path.join(logs_dir, "model.ckpt"), itr) # Save model # Write and display train loss if itr % 1==0: # Calculate train loss feed_dict = {image: Images, GTLabel: GTLabels, keep_prob: 1} TLoss=sess.run(Loss, feed_dict=feed_dict) print("Step " + str(itr) + " Train Loss=" + str(TLoss)) # Write train loss to file with open(TrainLossTxtFile, "a") as f: f.write("\n"+str(itr)+"\t"+str(TLoss)) f.close() # Write and display Validation Set Loss if UseValidationSet and itr % 25 == 0: SumAcc = np.float64(0.0) SumLoss = np.float64(0.0) NBatches = np.int(np.ceil(ValidReader.NumFiles/ValidReader.BatchSize)) print("Calculating Validation on " + str(ValidReader.NumFiles) + " Images") for i in range(NBatches): Images, GTLabels= ValidReader.ReadNextBatchClean() # load validation data feed_dict = {image: Images,GTLabel: GTLabels, keep_prob: 1.0} # Calculate loss for all labels set TLoss = sess.run(Loss, feed_dict=feed_dict) SumLoss += TLoss # Compute validation accuracy pred = sess.run(Net.Pred, feed_dict={image: Images, keep_prob: 1.0}) acc = accuracy_score(np.squeeze(GTLabels).ravel(), np.squeeze(pred).ravel()) SumAcc += acc # Print validation status to console print("Epoch: " + str(TrainReader.Epoch)) SumAcc/=NBatches SumLoss/=NBatches print("Validation Loss: " + str(SumLoss)) print("Validation Accuracy: " + str(SumAcc)) with open(ValidRecTxtFile, "a") as f: f.write("\n" + str(itr) + "\t" + str(SumLoss) + "\t" + str(SumAcc)) f.close()
def main(argv=None): # .........................Placeholders for input image and labels........................................................................ image = tf.placeholder( tf.float32, shape=[None, None, None, 3], name="input_image" ) # Input image batch first dimension image number second dimension width third dimension height 4 dimension RGB # -------------------------Build Net---------------------------------------------------------------------------------------------- Net = BuildNetVgg16.BUILD_NET_VGG16( vgg16_npy_path=model_path) # Create class instance for the net Net.build( image) # Build net and load intial weights (weights before training) # -------------------------Data reader for validation/testing images----------------------------------------------------------------------------------------------------------------------------- sess = tf.Session() #Start Tensorflow session sess.run(tf.global_variables_initializer()) #--------------------------------Get activation maps for layers 1-5 for the image---------------------------------------------------------------------------------------------------- [ cv11, cv12, cv21, cv22, cv31, cv32, cv33, cv41, cv42, cv43, cv51, cv52, cv53 ] = sess.run([ Net.conv1_1, Net.conv1_2, Net.conv2_1, Net.conv2_2, Net.conv3_1, Net.conv3_2, Net.conv3_3, Net.conv4_1, Net.conv4_2, Net.conv4_3, Net.conv5_1, Net.conv5_2, Net.conv5_3 ], feed_dict={image: np.expand_dims(Image, axis=0)}) #[cv11] = sess.run( [Net.conv1_1],feed_dict={image: np.expand_dims(Image, axis=0)}) #-----------------------------Concatenate activation maps to one large matrix of the image------------------------------- ConIm = np.zeros((Sy, Sx, 0)) Lr = [] Lr.append(0) tml = np.squeeze(cv11) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) tml = np.squeeze(cv12) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) Lr.append(ConIm.shape[2]) tml = np.squeeze(cv21) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) tml = np.squeeze(cv22) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) Lr.append(ConIm.shape[2]) tml = np.squeeze(cv31) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) tml = np.squeeze(cv32) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) tml = np.squeeze(cv33) ConIm = np.concatenate((ConIm, cv2.resize(tml / tml.max(), (Sx, Sy))), axis=2) Lr.append(ConIm.shape[2]) #-------------------------------------Create threads each thread display feature activation map in one color (RGB-------------------------------------------------------------------------------------- DispImg = np.zeros((Sy, Sx, 3), dtype=np.float32) #image to be display Pos = np.zeros( 3, dtype=np.float32 ) # Position of thread (The intesnity the activation map is displayed Rate = np.zeros(3, dtype=np.float32) # Rate of change in the thread intensity Mx = np.zeros(3, dtype=np.float32) # Normalizing factor for the activation AcMap = np.zeros( [3, Sy, Sx], dtype=np.float32) # Index of Activation map used bey thread #-----------------------------Create animation--------------------------------------------------------------------------------- for itr in range(NumFrame): #time.sleep(0.01) print(itr) for i in range( 3 ): #If thread reach max intensity start decrease intensity of the feature map if Pos[i] >= 255: Pos[i] = 255 Rate[i] = -np.abs(Rate[i]) if Pos[i] <= 0: #If thread reach zero intensity replace the feature map the thread display Pos[i] = 0 Rate[i] = np.random.rand( ) * 7 + 0.2 # Choose intensity change rate Ly = np.random.randint(1, Lr.__len__() - 1) # Choose layer AcMap[i] = ConIm[:, :, np.random.randint(Lr[Ly - 1], Lr[Ly] + 1)] # Chose activation map Mx[i] = 2.0 / AcMap[i].max() DispImg[:, :, i] = np.uint8( Mx[i] * AcMap[i] * Pos[i] ) # Create frame from the combination of the activation map use by each thread Pos[i] += Rate[i] #misc.imshow(DispImg*0.9+Image*0.1) #print(Rate) #print(Pos) #cv2.imshow("Anim",DispImg) VidOut.write(np.uint8(DispImg * 1 + Image * 0)) # add frame to video VidOut.release() # close video print("Done")