def evaluate(model=None,
             inp_images=None,
             annotations=None,
             checkpoints_path=None,
             epoch=None):
    #Finished implementation of the evaluate function in keras_segmentation.predict
    #Input: array of paths or nd arrays
    if model is None and (not checkpoints_path is None):
        model = model_from_checkpoint_path(checkpoints_path, epoch)

    ious = []
    pred_time = []
    for inp, ann in zip(inp_images, annotations):
        t_start = time.time()
        pr = predict_fast(model, inp)
        t_end = time.time()
        pred_time.append(t_end - t_start)
        print('Prediction time: ', t_end - t_start)

        gt = get_segmentation_arr(ann,
                                  model.n_classes,
                                  model.output_width,
                                  model.output_height,
                                  no_reshape=True)
        gt = gt.argmax(-1)
        iou = metrics.get_iou(gt, pr, model.n_classes)
        ious.append(iou)

    print("Class wise IoU ", np.mean(ious, axis=0))
    print("Total  IoU ", np.mean(ious))
    print("Median prediction time:", np.median(pred_time))
예제 #2
0
def evaluate(model=None,
             inp_images=None,
             annotations=None,
             checkpoints_path=None,
             epoch=None,
             visualize=False,
             output_folder=''):
    #Finished implementation of the evaluate function in keras_segmentation.predict
    #Input: array of paths or nd arrays
    if model is None and (not checkpoints_path is None):
        model = model_from_checkpoint_path(checkpoints_path, epoch)

    ious = []
    pred_time = []
    for inp, ann in tqdm(zip(inp_images, annotations)):
        t_start = time.time()
        pr = predict_fast(model, inp)
        t_end = time.time()
        pred_time.append(t_end - t_start)
        #print('Prediction time: ', pred_time)

        gt = get_segmentation_arr(ann,
                                  model.n_classes,
                                  model.output_width,
                                  model.output_height,
                                  no_reshape=True)
        gt = gt.argmax(-1)
        iou = metrics.get_iou(gt, pr, model.n_classes)
        ious.append(iou)

        if visualize:
            fig = vis_pred_vs_gt_separate(inp, pr, gt)
            plt.title("Predicted mask and errors. "
                      "IOU (bg, crop, lane):" + str(iou))
            if not output_folder:
                if epoch is None:
                    epoch = ''
                print(checkpoints_path, epoch, os.path.basename(inp))
                fig.savefig(checkpoints_path + epoch + '_IOU_' +
                            os.path.basename(inp))
                print(
                    'Saving to: ',
                    checkpoints_path + epoch + '_IOU_' + os.path.basename(inp))
            else:
                fig.savefig(
                    os.path.join(
                        output_folder,
                        os.path.basename(checkpoints_path) + '_IOU_' +
                        os.path.basename(inp)))
    print ious
    ious = np.array(ious)
    print("Class wise IoU ", np.mean(ious, axis=0))
    print("Total  IoU ", np.mean(ious))
    print("Mean prediction time:", np.mean(pred_time))
예제 #3
0
import time
#from scipy.misc import imresize
from PIL import Image

#Temporary code with one image, will be added back to evaluate function when done
#TODO: should be based on colormaps for error images and masks separately, instead of RGB encoding

labels = [0, 1, 2]

gt = np.load('gt_array.npy')
pr = np.load('pr_array.npy')
inp = r'Frogn_Dataset/images_prepped_test/20190913_LR1_S_0420.png'

image_mode = 'F'
input_image = Image.open(inp)
iou = metrics.get_iou(gt, pr, 3)
ious = []
ious.append(iou)

#visualization

fig = plt.figure()
plt.title("IOU:" + str(iou))
ax1 = fig.add_subplot(2, 2, 1)
ax1.imshow(gt - pr)
#ax1.colorbar()
ax1.title.set_text("Difference GT-pred")

ax2 = fig.add_subplot(2, 2, 2)
ax2.imshow(gt)
ax2.title.set_text('GT')
예제 #4
0
def evaluate_camvid():
    # get seg (ground truth) and compare with pr (prediction)
    # Get seg
    segs_path = "./dataset3/label/test/"
    segs  =  glob.glob( os.path.join(segs_path,"*.png")  ) 

    #print(len(segs))

    imglabels = np.ndarray((len(segs),2710, 3384), dtype=np.uint8) 
    i=0
    for x in range(len(segs)):
        imgpath = segs[x]

        pic_name = imgpath.split('/')[-1]

        labelpath = "dataset3/label/" + pic_name.split('.')[0] + '.png'

        label = load_img(labelpath, grayscale=True, target_size=[2710, 3384]) # grayscale = False
        
        label = img_to_array(label)
        
        imglabels[i] = label[:,:,0]
        if i % 100 == 0:
            print('Creating testing images: {0}/{1} images'.format(i, len(segs)))
        i += 1
    #np.save('./segs_test.npy', imglabels)


    i=0
    orininal_w = 3384
    orininal_h = 2710
    n_classes = 12

    ious=[]
    precisions=[]
    recalls=[]

    for pr in tqdm(all_prs):
        gt = imglabels[i]
        pr = cv2.resize(pr, (orininal_w , orininal_h), interpolation=cv2.INTER_NEAREST)
        #IoU
        iou = metrics.get_iou( gt , pr , n_classes )
        ious.append( iou )
        
        # precision
        precision = metrics.get_precision( gt , pr , n_classes )
        precisions.append( precision )
        
        # recall
        recall = metrics.get_recall( gt , pr , n_classes )
        recalls.append( recall )
        
        i+=1

    ious = np.array(ious)
    precisions = np.array( precisions )
    recalls = np.array(recalls)
    
    #print("Class wise IoU Class:{:d} / IoU:{:.2f}\n".format(11, np.mean(ious , axis=0 )[11]))

    #print("Class wise Precision Class:{:d} / Precision:{:.2f}\n".format(11, np.mean(precisions , axis=0 )[11]))

    #print("Class wise Recall Class:{:d} / Recall:{:.2f}\n".format(11, np.mean(recalls , axis=0 )[11]))
    
    print("Class wise IoU "  ,  np.mean(ious , axis=0 ))
    print("Total  IoU "  ,  np.mean(ious ))
#Save one at a time as image

for ind, fname in enumerate(inp_names):
    #Load images
    inp = os.path.join(image_folder, fname)
    ann = os.path.join(annotations_folder, fname)
    #Run prediction
    pr = predict_fast(model, inp)
    gt = get_segmentation_arr(ann,
                              model.n_classes,
                              model.output_width,
                              model.output_height,
                              no_reshape=True)
    gt = gt.argmax(-1)
    iou = metrics.get_iou(gt, pr, model.n_classes)

    #--Make overlay image
    im_overlay = vis_pred_vs_gt_overlay(
        inp, pr, gt, figure_width_mm=85)  #check column widht for CASE
    plt.imsave(output_name + '_' + os.path.basename(inp), im_overlay)
    np.savetxt('IOU_' + os.path.basename(inp) + '.txt', iou)

#All in one figure
'''
fig,ax = plt.subplots(nrows=len(inp_names),ncols=2,figsize=(figure_width_mm/25.4,figure_height_mm/25.4))
for ind,fname in enumerate(inp_names):
    #Load images
    inp = os.path.join(image_folder,fname)
    ann = os.path.join(annotations_folder,fname)
    #Run prediction