Esempio n. 1
0
def cal_acc(data_list, pred_folder, derain_folder, classes, names,
            result_txt_path):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    psnr_meter = AverageMeter()
    ssim_meter = AverageMeter()

    for i, (image_path, rain_image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        derain_pred = cv2.imread(
            os.path.join(derain_folder, image_name + '.png'))
        clear_target = cv2.imread(image_path)
        psnr, ssim = caculate_psnr_ssim(derain_pred, clear_target)

        seg_pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                              cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            seg_pred, target, classes)

        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        psnr_meter.update(psnr)
        ssim_meter.update(ssim)

        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, psnr {3:.4f}, ssim {4:.4f}, accuracy {5:.4f}.'
            .format(i + 1, len(data_list), image_name + '.png', psnr, ssim,
                    accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    PSNR = psnr_meter.avg
    SSIM = ssim_meter.avg

    logger.info('Eval result: PSNR/SSIM {:.4f}/{:.4f}.'.format(PSNR, SSIM))
    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))

    with open(result_txt_path, 'w') as result_file:
        result_file.writelines(
            'Eval result: PSNR/SSIM {:.4f}/{:.4f}.\n'.format(PSNR, SSIM))
        result_file.writelines(
            'Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.\n'.format(
                mIoU, mAcc, allAcc))
        for i in range(classes):
            result_file.writelines(
                'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.\n'.
                format(i, iou_class[i], accuracy_class[i], names[i]))
Esempio n. 2
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(
                i + 1, len(data_list), image_name + '.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    colors = np.loadtxt(args.colors_path).astype('uint8')
    names = [line.rstrip('\n') for line in open(args.names_path)]

    for i, (image_path, target_path) in enumerate(data_list):
        fd_name=image_path.split('/')[-3]
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name+'.png'), cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        
        color = colorize(target, colors)
        
        color_path = os.path.join('/local/xjqi/2ddata/tasks/semseg_50_bn/exp/ade20k/pspnet50/result/epoch_100/val/ss/gt/', fd_name+"_"+image_name + '.png')
        color.save(color_path)
        
        
        intersection, union, target = intersectionAndUnion(pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        #logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
def cal_acc(data_list, pred_folder, classes, names, datasetname):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        scene_name = image_path.split('/')[-3]
        pred = cv2.imread(
            os.path.join(pred_folder, scene_name + '_' + image_name + '.png'),
            cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(
                i + 1, len(data_list), image_name + '.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)

    if datasetname == 'scannet':
        #ScanNet has 20 categories ignored, so we only calculate the valid categories.
        mIoU = 0.0  #np.mean(iou_class)
        mAcc = 0.0  #np.mean(accuracy_class)
        category_cnt = 0
        for i in range(iou_class.shape[0]):
            if i in [
                    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33,
                    34, 36, 39
            ]:
                mIoU += iou_class[i]
                mAcc += accuracy_class[i]
                category_cnt += 1
        mIoU /= category_cnt
        mAcc /= category_cnt

    else:
        #For other datasets except for ScanNet, uncomment these two lines
        mIoU = np.mean(iou_class)
        mAcc = np.mean(accuracy_class)

    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
Esempio n. 5
0
def cal_acc(data_list,
            pred_folder,
            classes,
            names,
            is_med=False,
            label_mapping=None):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = get_image_name(target_path, is_med)
        pred_fp = os.path.join(pred_folder, image_name)
        # print('pred : %s \n target : %s' %(pred_fp, target_path))
        pred = cv2.imread(pred_fp, cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        if label_mapping is not None:
            target = convert_label(target, label_mapping)
        # print('pred: %s target: %s' %(np.unique(pred), np.unique(target)))

        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        dice = sum(intersection_meter.val[1:]) * 2 / (
            sum(intersection_meter.val[1:]) + sum(union_meter.val[1:]) + 1e-10)
        logger.info(
            'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}, dice {4:.4f}.'.
            format(i + 1, len(data_list), image_name, accuracy, dice))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    dice_class = intersection_meter.sum * 2 / (union_meter.sum +
                                               intersection_meter.sum + 1e-10)
    mDice = np.mean(dice_class[1:])
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info(
        'Eval result: mIoU/mAcc/allAcc/mDice {:.4f}/{:.4f}/{:.4f}/{:.4f}.'.
        format(mIoU, mAcc, allAcc, mDice))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy/dice {:.4f}/{:.4f}/{:.4f}, name: {}.'
            .format(i, iou_class[i], accuracy_class[i], dice_class[i],
                    names[i]))
Esempio n. 6
0
def cal_acc(data_list, pred_folder, classes, names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    from sotabencheval.semantic_segmentation import ADE20KEvaluator
    evaluator = ADE20KEvaluator(model_name='PSPNet (ResNet-50)',
                                paper_arxiv_id='1612.01105')

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        pred = cv2.imread(os.path.join(pred_folder, image_name + '.png'),
                          cv2.IMREAD_GRAYSCALE)
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        intersection, union, target = intersectionAndUnion(
            pred, target, classes)
        cache_exists = evalintersectionAndUnion(pred, target, classes,
                                                evaluator)

        if cache_exists:
            break

        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(
            intersection_meter.val) / (sum(target_meter.val) + 1e-10)

    if cache_exists:
        evaluator.save()
        return

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    print('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        print('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
            i, iou_class[i], accuracy_class[i], ''))
Esempio n. 7
0
def cal_acc(data_list, pred_folder, classes, names, pred_path_list=None, target_path_list=None):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    if args.test_in_nyu_label_space:
        classes = 41

    for i, (image_path, target_path) in enumerate(data_list):
        image_name = image_path.split('/')[-1].split('.')[0]
        if pred_path_list is not None:
            pred_path = pred_path_list[i]
        else:
            pred_path = os.path.join(pred_folder, image_name+'.png')
        pred = cv2.imread(pred_path, cv2.IMREAD_GRAYSCALE)
        if target_path_list is not None:
            target_path = target_path_list[i]
        target = cv2.imread(target_path, cv2.IMREAD_GRAYSCALE)
        if i < 10:
            print(pred_path, target_path)

        intersection, union, target = intersectionAndUnion(pred, target, classes, args.ignore_label)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        accuracy = sum(intersection_meter.val) / (sum(target_meter.val) + 1e-10)
        logger.info('Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.format(i + 1, len(data_list), image_name+'.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(mIoU, mAcc, allAcc))
    for i in range(classes):
        print(len(iou_class), len(accuracy_class), len(names))
        logger.info('Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(i, iou_class[i], accuracy_class[i], names[i]))
Esempio n. 8
0
def test(model, criterion, names):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    rooms = sorted(os.listdir(args.train_full_folder))
    rooms_split = [
        room for room in rooms if 'Area_{}'.format(args.test_area) in room
    ]
    gt_all, pred_all = np.array([]), np.array([])
    check_makedirs(args.save_folder)
    pred_save, gt_save = [], []
    for idx, room_name in enumerate(rooms_split):
        data_room, label_room, index_room, gt = data_prepare(
            os.path.join(args.train_full_folder, room_name))
        batch_point = args.num_point * args.test_batch_size
        batch_num = int(np.ceil(label_room.size / batch_point))
        end = time.time()
        output_room = np.array([])
        for i in range(batch_num):
            s_i, e_i = i * batch_point, min((i + 1) * batch_point,
                                            label_room.size)
            input, target, index = data_room[
                s_i:e_i, :], label_room[s_i:e_i], index_room[s_i:e_i]
            input = torch.from_numpy(input).float().view(
                -1, args.num_point, input.shape[1])
            target = torch.from_numpy(target).long().view(-1, args.num_point)
            with torch.no_grad():
                output = model(input.cuda())
            loss = criterion(output, target.cuda())  # for reference
            output = output.transpose(1, 2).contiguous().view(
                -1, args.classes).data.cpu().numpy()
            pred = np.argmax(output, axis=1)
            intersection, union, target = intersectionAndUnion(
                pred,
                target.view(-1).data.cpu().numpy(), args.classes,
                args.ignore_label)
            accuracy = sum(intersection) / (sum(target) + 1e-10)
            output_room = np.vstack([output_room, output
                                     ]) if output_room.size else output
            batch_time.update(time.time() - end)
            end = time.time()
            if ((i + 1) % args.print_freq == 0) or (i + 1 == batch_num):
                logger.info(
                    'Test: [{}/{}]-[{}/{}] '
                    'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                    'Loss {loss:.4f} '
                    'Accuracy {accuracy:.4f} '
                    'Points {gt.size}.'.format(idx + 1,
                                               len(rooms_split),
                                               i + 1,
                                               batch_num,
                                               batch_time=batch_time,
                                               loss=loss,
                                               accuracy=accuracy,
                                               gt=gt))
        '''
        unq, unq_inv, unq_cnt = np.unique(index_room, return_inverse=True, return_counts=True)
        index_array = np.split(np.argsort(unq_inv), np.cumsum(unq_cnt[:-1]))
        output_room = np.vstack([output_room, np.zeros((1, args.classes))])
        index_array_fill = np.array(list(itertools.zip_longest(*index_array, fillvalue=output_room.shape[0] - 1))).T
        pred = output_room[index_array_fill].sum(1)
        pred = np.argmax(pred, axis=1)
        '''
        pred = np.zeros((gt.size, args.classes))
        for j in range(len(index_room)):
            pred[index_room[j]] += output_room[j]
        pred = np.argmax(pred, axis=1)

        # calculation 1: add per room predictions
        intersection, union, target = intersectionAndUnion(
            pred, gt, args.classes, args.ignore_label)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        # calculation 2
        pred_all = np.hstack([pred_all, pred]) if pred_all.size else pred
        gt_all = np.hstack([gt_all, gt]) if gt_all.size else gt
        pred_save.append(pred), gt_save.append(gt)

    with open(
            os.path.join(args.save_folder,
                         "pred_{}.pickle".format(args.test_area)),
            'wb') as handle:
        pickle.dump({'pred': pred_save},
                    handle,
                    protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            os.path.join(args.save_folder,
                         "gt_{}.pickle".format(args.test_area)),
            'wb') as handle:
        pickle.dump({'gt': gt_save}, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # calculation 1
    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU1 = np.mean(iou_class)
    mAcc1 = np.mean(accuracy_class)
    allAcc1 = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    # calculation 2
    intersection, union, target = intersectionAndUnion(pred_all, gt_all,
                                                       args.classes,
                                                       args.ignore_label)
    iou_class = intersection / (union + 1e-10)
    accuracy_class = intersection / (target + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection) / (sum(target) + 1e-10)
    logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    logger.info('Val1 result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU1, mAcc1, allAcc1))

    for i in range(args.classes):
        logger.info(
            'Class_{} Result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return mIoU, mAcc, allAcc, pred_all
def cal_acc(list_of_list, root_dir, pred_folder, classes, flow_warp, flownet,
            names):
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()
    for i in list_of_list:
        list_tem = './data/list/cityscapes/val_sam/' + i
        data_list = open(list_tem.strip())
        for i, image_path in enumerate(data_list):
            if i > 28:
                print('done!')
            else:
                image_name = image_path.split('/')[-1].split('.')[0]
                splits = image_path.split('_')
                frame_next = "_".join(splits[:-2] + [(str(int(splits[-2]) +
                                                          1)).rjust(6, "0")] +
                                      splits[-1:])
                # if accel:
                #     bgr_label = cv2.imread(os.path.join(pred_folder, image_name + '.png'))
                #     rgb_label = cv2.cvtColor(bgr_label, cv2.COLOR_BGR2RGB)
                #     pred = color2grey(rgb_label, colors)
                # else:
                pred = cv2.imread(
                    os.path.join(pred_folder, image_name + '.png'),
                    cv2.IMREAD_GRAYSCALE)
                print(os.path.join(pred_folder, image_name + '.png'))
                frame_name_next = frame_next.split('/')[-1].split('.')[0]
                pred_next = cv2.imread(
                    os.path.join(pred_folder, frame_name_next + '.png'),
                    cv2.IMREAD_GRAYSCALE)

                image = cv2.imread(
                    os.path.join(root_dir, image_path.strip()), cv2.
                    IMREAD_COLOR)  # BGR 3 channel ndarray wiht shape H * W * 3
                image = cv2.cvtColor(
                    image, cv2.COLOR_BGR2RGB
                )  # convert cv2 read image from BGR order to RGB order
                image = np.float32(image)
                image = normalize(image)
                frame = cv2.imread(os.path.join(root_dir, frame_next.strip()),
                                   cv2.IMREAD_COLOR)
                frame = cv2.cvtColor(
                    frame, cv2.COLOR_BGR2RGB
                )  # convert cv2 read image from BGR order to RGB order
                frame = np.float32(frame)
                frame = normalize(frame)
                image_ten = torch.from_numpy(
                    frame.astype(np.float32).transpose(
                        2, 0, 1)).cuda().unsqueeze(0).cuda()
                frame_ten = torch.from_numpy(
                    image.astype(np.float32).transpose(
                        2, 0, 1)).cuda().unsqueeze(0).cuda()
                pred_next_ten = torch.from_numpy(pred_next.astype(
                    np.float32)).cuda().unsqueeze(0).unsqueeze(0).cuda()
                flownet.eval()

                output_flow_filename = os.path.join(root_dir + '/flow_val/',
                                                    image_name + '.flo')
                if not os.path.exists(root_dir + '/flow_val/'):
                    os.makedirs(root_dir + '/flow_val/')
                if not os.path.exists(output_flow_filename):
                    print('lack of flow', output_flow_filename)
                    with torch.no_grad():
                        flow = flownet(frame_ten, image_ten)
                    save_flo(flow, output_flow_filename)
                else:
                    flow = read_flo(output_flow_filename)
                    flow = torch.from_numpy(
                        flow.astype(np.float32).transpose(
                            2, 0, 1)).cuda().unsqueeze(0).cuda()
                # warp_i1 = flow_warp(frame_ten, flow)
                warp_pred = flow_warp(pred_next_ten, flow)

                # noc_mask2 = torch.exp(-1 * torch.abs(torch.sum(image_ten - warp_i1, dim=1))).unsqueeze(1)
                # error=(warp_pred-pred)*noc_mask2

                intersection, union, target = intersectionAndUnion(
                    pred, warp_pred[0][0].cpu().data.numpy().astype(np.uint8),
                    classes)
                intersection_meter.update(intersection)
                union_meter.update(union)
                target_meter.update(target)
                accuracy = sum(
                    intersection_meter.val) / (sum(target_meter.val) + 1e-10)
                logger.info(
                    'Evaluating {0}/{1} on image {2}, accuracy {3:.4f}.'.
                    format(i + 1, 29, image_name + '.png', accuracy))

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    logger.info('Eval result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    for i in range(classes):
        logger.info(
            'Class_{} result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
Esempio n. 10
0
def main():
    global logger
    logger = get_logger()

    classes = 13
    color_map = np.zeros((classes, 3))
    names = [line.rstrip('\n') for line in open('data/s3dis/s3dis_names.txt')]
    for i in range(classes):
        color_map[i, :] = get_color(i)
    data_root = 'dataset/s3dis/trainval_fullarea'
    data_list = sorted(os.listdir(data_root))
    data_list = [item[:-4] for item in data_list if 'Area_' in item]
    intersection_meter, union_meter, target_meter = AverageMeter(
    ), AverageMeter(), AverageMeter()

    logger.info('<<<<<<<<<<<<<<<<< Start Evaluation <<<<<<<<<<<<<<<<<')
    test_area = [1, 2, 3, 4, 5, 6]
    for i in range(len(test_area)):
        # result_path = os.path.join('exp/s3dis', exp_list[test_area[i]-1], 'result')
        result_path = '/exp/s3dis/6-fold'  # where to save all result files
        # pred_save_folder = os.path.join(result_path, 'best_visual/pred')
        # label_save_folder = os.path.join(result_path, 'best_visual/label')
        # image_save_folder = os.path.join(result_path, 'best_visual/image')
        # check_makedirs(pred_save_folder); check_makedirs(label_save_folder); check_makedirs(image_save_folder)
        with open(
                os.path.join(result_path,
                             'pred_{}'.format(test_area[i]) + '.pickle'),
                'rb') as handle:
            pred = pickle.load(handle)['pred']
        with open(
                os.path.join(result_path,
                             'gt_{}'.format(test_area[i]) + '.pickle'),
                'rb') as handle:
            label = pickle.load(handle)['gt']
        data_split = [
            item for item in data_list
            if 'Area_{}'.format(test_area[i]) in item
        ]
        assert len(pred) == len(label) == len(data_split)
        for j in range(len(data_split)):
            print('processing [{}/{}]-[{}/{}]'.format(i + 1, len(test_area),
                                                      j + 1, len(data_split)))
            # data_name = data_split[j]
            # data = np.load(os.path.join(data_root, data_name + '.npy'))
            # coord, feat = data[:, :3], data[:, 3:6]
            pred_j, label_j = pred[j].astype(np.uint8), label[j].astype(
                np.uint8)
            # pred_j_color, label_j_color = color_map[pred_j, :], color_map[label_j, :]
            # vis_util.write_ply_color(coord, pred_j, os.path.join(pred_save_folder, data_name +'.obj'))
            # vis_util.write_ply_color(coord, label_j, os.path.join(label_save_folder, data_name + '.obj'))
            # vis_util.write_ply_rgb(coord, feat, os.path.join(image_save_folder, data_name + '.obj'))
            intersection, union, target = intersectionAndUnion(
                pred_j, label_j, classes, ignore_index=255)
            intersection_meter.update(intersection)
            union_meter.update(union)
            target_meter.update(target)

    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)
    logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))

    for i in range(classes):
        logger.info(
            'Class_{} Result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Esempio n. 11
0
def test(model, criterion, names):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    data_file = os.path.join(args.data_root,
                             'scannet_{}.pickle'.format(args.split))
    file_pickle = open(data_file, 'rb')
    xyz_all = pickle.load(file_pickle, encoding='latin1')
    label_all = pickle.load(file_pickle, encoding='latin1')
    file_pickle.close()
    gt_all, pred_all = np.array([]), np.array([])
    vox_acc = []
    check_makedirs(args.save_folder)
    pred_save, gt_save = [], []
    for idx in range(len(xyz_all)):
        points, labels = xyz_all[idx], label_all[idx].astype(np.int32)
        gt = labels - 1
        gt[labels == 0] = 255
        data_room, label_room, index_room = data_prepare(points, gt)
        batch_point = args.num_point * args.test_batch_size
        batch_num = int(np.ceil(label_room.size / batch_point))
        end = time.time()
        output_room = np.array([])
        for i in range(batch_num):
            s_i, e_i = i * batch_point, min((i + 1) * batch_point,
                                            label_room.size)
            input, target, index = data_room[
                s_i:e_i, :], label_room[s_i:e_i], index_room[s_i:e_i]
            input = torch.from_numpy(input).float().view(
                -1, args.num_point, input.shape[1])
            target = torch.from_numpy(target).long().view(-1, args.num_point)
            with torch.no_grad():
                output = model(input.cuda())
            loss = criterion(output, target.cuda())  # for reference
            output = output.transpose(1, 2).contiguous().view(
                -1, args.classes).data.cpu().numpy()
            pred = np.argmax(output, axis=1)
            intersection, union, target = intersectionAndUnion(
                pred,
                target.view(-1).data.cpu().numpy(), args.classes,
                args.ignore_label)
            accuracy = sum(intersection) / (sum(target) + 1e-10)
            output_room = np.vstack([output_room, output
                                     ]) if output_room.size else output
            batch_time.update(time.time() - end)
            end = time.time()
            if ((i + 1) % args.print_freq == 0) or (i + 1 == batch_num):
                logger.info(
                    'Test: [{}/{}]-[{}/{}] '
                    'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                    'Loss {loss:.4f} '
                    'Accuracy {accuracy:.4f} '
                    'Points {gt.size}.'.format(idx + 1,
                                               len(xyz_all),
                                               i + 1,
                                               batch_num,
                                               batch_time=batch_time,
                                               loss=loss,
                                               accuracy=accuracy,
                                               gt=gt))

        pred = np.zeros((gt.size, args.classes))
        for j in range(len(index_room)):
            pred[index_room[j]] += output_room[j]
        pred = np.argmax(pred, axis=1)

        # calculation 1: add per room predictions
        intersection, union, target = intersectionAndUnion(
            pred, gt, args.classes, args.ignore_label)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        # calculation 2
        pred_all = np.hstack([pred_all, pred]) if pred_all.size else pred
        gt_all = np.hstack([gt_all, gt]) if gt_all.size else gt
        pred_save.append(pred), gt_save.append(gt)

        # compute voxel accuracy (follow scannet, pointnet++ and pointcnn)
        res = 0.0484
        coord_min, coord_max = np.min(points, axis=0), np.max(points, axis=0)
        nvox = np.ceil((coord_max - coord_min) / res)
        vidx = np.ceil((points - coord_min) / res)
        vidx = vidx[:,
                    0] + vidx[:, 1] * nvox[0] + vidx[:, 2] * nvox[0] * nvox[1]
        uvidx, vpidx = np.unique(vidx, return_index=True)
        # compute voxel label
        uvlabel = np.array(gt)[vpidx]
        uvpred = np.array(pred)[vpidx]
        # compute voxel accuracy (ignore label 0 which is scannet unannotated)
        c_accvox = np.sum(np.equal(uvpred, uvlabel))
        c_ignore = np.sum(np.equal(uvlabel, 255))
        vox_acc.append([c_accvox, len(uvlabel) - c_ignore])

    with open(
            os.path.join(args.save_folder,
                         "pred_{}.pickle".format(args.split)), 'wb') as handle:
        pickle.dump({'pred': pred_save},
                    handle,
                    protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            os.path.join(args.save_folder, "gt_{}.pickle".format(args.split)),
            'wb') as handle:
        pickle.dump({'gt': gt_save}, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # calculation 1
    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU1 = np.mean(iou_class)
    mAcc1 = np.mean(accuracy_class)
    allAcc1 = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    # calculation 2
    intersection, union, target = intersectionAndUnion(pred_all, gt_all,
                                                       args.classes,
                                                       args.ignore_label)
    iou_class = intersection / (union + 1e-10)
    accuracy_class = intersection / (target + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection) / (sum(target) + 1e-10)
    # compute avg voxel acc
    vox_acc = np.sum(vox_acc, 0)
    voxAcc = vox_acc[0] * 1.0 / vox_acc[1]
    logger.info(
        'Val result: mIoU/mAcc/allAcc/voxAcc {:.4f}/{:.4f}/{:.4f}/{:.4f}.'.
        format(mIoU, mAcc, allAcc, voxAcc))
    logger.info(
        'Val111 result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}/{:.4f}.'.format(
            mIoU1, mAcc1, allAcc1, voxAcc))

    for i in range(args.classes):
        logger.info(
            'Class_{} Result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return mIoU, mAcc, allAcc, pred_all