Exemplo n.º 1
0
def get_iou(prediction, ns, i, write_pred=True):
    print ns[i][0]
    img_name = (ns[i][0].split('/')[-1]).split('.')[0] + '/'
    cur_dir = OUTPUT_DIR + img_name
    os.mkdir(OUTPUT_DIR + img_name)
    prob = cv2.resize(prediction, (orig_width, orig_height))
    orig_img = cv2.imread(ns[i][0])
    cv2.imwrite('temp.jpg', prob)
    p = cv2.imread('temp.jpg')
    mask_orig = cv2.imread(ns[i][1])
    # visualize(orig_img, 255 * prob, mask_orig)
    pred, orig = get_pred_orig_grayscale(p, mask_orig)

    if write_pred:
        # get and write transparent mask for predicted image
        output_pred = get_transparent_prediction(orig_img, p, alpha=0.5, orig=False)
        cv2.imwrite(cur_dir + str(i) + '_' + ns[i][1].split('/')[-1], output_pred)

        # get and write transparent mask for ground truth
        output = get_transparent_prediction(orig_img, mask_orig, alpha=0.5, orig=True)
        cv2.imwrite(cur_dir + 'orig_' + ns[i][1].split('/')[-1], output)
        cv2.imwrite(cur_dir + 'mask_pred_' + ns[i][1].split('/')[-1], 255 * pred)
        cv2.imwrite(cur_dir + 'mask_orig_' + ns[i][1].split('/')[-1], 255 * orig)

    return metric.pixel_accuracy(pred, orig)
Exemplo n.º 2
0
    # Define IoU metric
    y_true = Y_true_all[i, :, :, :1]
    y_true = np.squeeze(y_true)
    y_true = resize(y_true, (512, 512), mode='constant', preserve_range=True)
    thresh = threshold_otsu(y_true)
    y_true = y_true > thresh
    # y_true = y_true / 255
    y_pred = Y_pre_all[i, :, :, :1]
    y_pred = np.squeeze(y_pred)

    print y_true.shape, y_pred.shape

    meanIOU = mean_IU(y_pred, y_true)
    print "meanIOU:"
    print meanIOU
    ac = pixel_accuracy(y_pred, y_true)
    print 'Pixel ACC:'
    print ac

    mean_acc = mean_accuracy(y_pred, y_true)
    print "Mean ACC:"
    print mean_acc

    dice = dice_coef(y_pred, y_true)
    print "Dice:"
    print dice
    write_data = [str(meanIOU), str(ac), str(mean_acc), str(dice)]
    csv_writer.writerow(write_data)
    mean_IOU_pool.append(meanIOU)
    Pixel_ACC_pool.append(ac)
    Mean_ACC_pool.append(mean_acc)
Exemplo n.º 3
0
def main():
    parser = \
        argparse.ArgumentParser(
            prog='parser',
            formatter_class=argparse.RawDescriptionHelpFormatter,
            description=textwrap.dedent('''
    **************************************************************
                       Result Evaluation
                    -----------------------
    The path to Result and Reference Pictures is user-defined path.
    **************************************************************
                                     ''')
                                     )

    parser.add_argument('--result_image_path', type=str,
                        help='the path to the result images')
    parser.add_argument('--ref_image_path', type=str,
                        help='the path to the davis reference path')

    args = parser.parse_args()

    result_image_path = args.result_image_path
    if not  result_image_path:
        raise Exception("No result image path is given neither in "
                        "command line nor environment arguements")

    ref_image_path = args.ref_image_path
    if not ref_image_path:
        raise Exception("No reference images path is given neither in "
                        "command line nor environment arguements")

    result_imgs = []
    ref_imgs = []
    for a, b, result_files in os.walk(result_image_path):
        result_imgs = result_files

    for a, b, ref_files in os.walk(ref_image_path):
        ref_imgs = ref_files


   # if len(result_imgs) != len(ref_imgs):
    #    raise Exception("The number of image in result and ref is not match. Cannot evaluate.")

    print("+", "-".center(85, '-'), "+")
    print("|",  "pixel_accuracy".center(20), "mean_IU".center(20),
          "mean_accuracy".center(20), "frequency_weighted_IU".center(20), "|".rjust(2))
    result_image_filenames = os.listdir(result_image_path)
    ref_image_filenames = os.listdir(ref_image_path)

    image_converter = ImageConverter()
    each_pixel_accuracy = []
    each_mean_IU = []
    each_mean_accuracy = []
    each_frequency_weighted_IU = []
    for i in range(len(result_imgs)):
        print("filename:", result_image_filenames[i])
        result_imgs[i] = image_converter.image_to_array(os.path.join(result_image_path, result_image_filenames[i]))
        ref_imgs[i] = image_converter.image_to_array(os.path.join(ref_image_path, ref_image_filenames[i]))

        i_pixel_accuracy = pixel_accuracy(result_imgs[i], ref_imgs[i])
        //i_mean_IU = mean_IU(result_imgs[i], ref_imgs[i])
        i_mean_IU = db_eval_iou(ref_imgs[i], result_imgs[i])
        i_mean_accuracy = mean_accuracy(result_imgs[i], ref_imgs[i])
        i_frequency_weighted_IU = frequency_weighted_IU(result_imgs[i], ref_imgs[i])

        each_pixel_accuracy.append(i_pixel_accuracy)
        each_mean_IU.append(i_mean_IU)
        each_mean_accuracy.append(i_mean_accuracy)
        each_frequency_weighted_IU.append(i_frequency_weighted_IU)

        print("|",
        str(i_pixel_accuracy).center(20), str(i_mean_IU).center(20),
              str(i_mean_accuracy).center(20), str(i_frequency_weighted_IU).center(20), "|".rjust(2))

    print("| mean value:",
          str(np.mean(each_pixel_accuracy)).center(12),
          str(np.mean(each_mean_IU)).center(20),
          str(np.mean(each_mean_accuracy)).center(20),
          str(np.mean(each_frequency_weighted_IU)).center(20),"|".rjust(2))
    print("+", '-'.center(85, '-'), "+")
Exemplo n.º 4
0
    def testFourClasses1(self):
        segm = np.array([[1,2,3,0,0], [0,0,0,0,0]])
        gt   = np.array([[1,0,0,0,0], [0,0,0,0,0]])

        res = es.pixel_accuracy(segm, gt)
        self.assertEqual(res, (7.0+1.0)/(9.0+1.0))
Exemplo n.º 5
0
    def testFiveClasses0(self):
        segm = np.array([[1,2,3,4,3], [0,0,0,0,0]])
        gt   = np.array([[1,0,3,0,0], [0,0,0,0,0]])

        res = es.pixel_accuracy(segm, gt)
        self.assertEqual(res, (5.0+1.0+1.0)/(8.0+1.0+1.0))
Exemplo n.º 6
0
    def testTwoClasses1(self):
        segm = np.array([[1,0,0,0,0], [0,0,0,0,0]])
        gt   = np.array([[0,0,0,0,0], [0,0,0,0,0]])

        res = es.pixel_accuracy(segm, gt)
        self.assertEqual(res, (9.0)/(10.0))
Exemplo n.º 7
0
    def testThreeClasses1(self):
        segm = np.array([[0,2,0,0,0], [0,0,0,0,0]])
        gt   = np.array([[1,0,0,0,0], [0,0,0,0,0]])

        res = es.pixel_accuracy(segm, gt)
        self.assertEqual(res, (8.0+0.0)/(9.0+1.0))
Exemplo n.º 8
0
    def testTwoClasses0(self):
        segm = np.array([[1,1,1,1,1], [1,1,1,1,1]])
        gt   = np.array([[0,0,0,0,0], [0,0,0,0,0]])

        res = es.pixel_accuracy(segm, gt)
        self.assertEqual(res, 0)
Exemplo n.º 9
0
    def testOneClass(self):
        segm = np.array([[0,0], [0,0]])
        gt   = np.array([[0,0], [0,0]])

        res = es.pixel_accuracy(segm, gt)
        self.assertEqual(res, 1.0)
Exemplo n.º 10
0
        p = cv2.imread('temp.jpg')
        mask_orig = cv2.imread(ns[i][1])

        # visualize(orig_img, 255 * prob, mask_orig)

        # get and write transparent mask for predicted image
        output_pred = get_transparent_prediction(orig_img, p,alpha=0.5,orig=False)
        cv2.imwrite(cur_dir + str(i) + '_' + ns[i][1].split('/')[-1], output_pred)

        # get and write transparent mask for ground truth
        output = get_transparent_prediction(orig_img, mask_orig,alpha=0.5,orig=True)
        cv2.imwrite(cur_dir + 'orig_' + ns[i][1].split('/')[-1], output)
        pred, orig = get_pred_orig_grayscale(p, mask_orig)
        cv2.imwrite(cur_dir + 'mask_pred_' + ns[i][1].split('/')[-1],255 * pred)
        cv2.imwrite(cur_dir + 'mask_orig_' + ns[i][1].split('/')[-1],255 * orig)
        iou = metric.pixel_accuracy(pred, orig)
        if iou != -1:
            pixel_accuracy.append(iou)
        print "MEAN accuracy: {0}".format(iou)

        # for original submission
        mask = prob > threshold
        rle = run_length_encode(mask)
        rles.append(rle)
        i += 1

print "Average pixel accuracy: {0}".format(np.sum(pixel_accuracy) / len(pixel_accuracy))
print "number of defected images: {0}".format(len(pixel_accuracy))

print("Generating submission file...")  
df = pd.DataFrame({'img': names, 'rle_mask': rles})
Exemplo n.º 11
0
    cls_5 = pred == 5
    cls_6 = pred == 6
    cls_7 = pred == 7
    cls_8 = pred == 8
    pred[cls_3] = 1
    pred[cls_2] = 2
    pred[cls_4] = 2
    pred[cls_5] = 2
    pred[cls_6] = 2
    pred[cls_7] = 2
    pred[cls_8] = 2
    """
    print('unique label after is {}'.format(np.unique(label)))
    print('unique pred after is {}'.format(np.unique(pred)))

    pa = eval_segm.pixel_accuracy(pred, label)
    ma = eval_segm.mean_accuracy(pred, label)
    rate_precision_mean = eval_segm.mean_precision(pred, label)
    m_iu, iu = eval_segm.mean_IU(pred, label)
    fw_iu = eval_segm.frequency_weighted_IU(pred, label)
    pa_list.append(pa)
    ma_list.append(ma)
    list_rate_precision_mean.append(rate_precision_mean)
    iu_list.append(iu)
    m_iu_list.append(m_iu)
    fw_iu_list.append(fw_iu)
    num_true_positives += eval_segm.get_num_true_positives(pred, label)
    num_false_positives += eval_segm.get_num_false_positives(pred, label)

    counts.append(count)
    print(np.array(count), np.array(gt_count[_name]))
#membuat array untuk menampung nilai
    
list_pixel_acc = []
list_mean_acc = []
list_mean_IU = []
list_fpr = []
list_f1_score = []
list_recall = []
list_precision = []

# menampung hasil di result format.csv
with open('{} result {}.csv'.format(classes, post_processing_method), 'w') as fsave:
    for idx in range(len(true_label_img)):
        
        pixel_acc = eval_segm.pixel_accuracy(predicted_label_img[idx], true_label_img[idx])
        list_pixel_acc.append(pixel_acc)
        mean_acc = eval_segm.mean_accuracy(predicted_label_img[idx], true_label_img[idx])
        list_mean_acc.append(mean_acc)
        mean_UI = eval_segm.mean_IU(predicted_label_img[idx], true_label_img[idx])
        list_mean_IU.append(mean_UI)
        pred_segm = predicted_label_img[idx].copy()
        gt_segm = true_label_img[idx].copy()
        fpr = eval_segm.get_fpr(pred_segm, gt_segm)
        precision, recall, f1 = eval_segm.get_all(pred_segm, gt_segm)
        list_f1_score.append(f1)
        list_precision.append(precision)
        list_recall.append(recall)
        list_fpr.append(fpr)
        
# =============================================================================