Ejemplo n.º 1
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        prediction = predict_whole_img(model, input)
        # prediction=prediction[0].numpy()
        prediction = np.argmax(prediction, axis=3)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))

        for g in range(0, prediction.shape[0]):
            check_makedirs(gray_folder)
            gray = np.uint8(prediction[g])
            image_path, _ = data_list[i * args.batch_size_gen + g]
            image_name = image_path.split('/')[-1].split('.')[0]
            gray_path = os.path.join(gray_folder, image_name + '.png')
            cv2.imwrite(gray_path, gray)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 2
0
Archivo: test.py Proyecto: lixin666/MGL
def test(test_loader, data_list, model, cod_folder, coee_folder):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    check_makedirs(cod_folder)
    check_makedirs(coee_folder)
    for i, (input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        with torch.no_grad():
            cod_pred, coee_pred = model(input)
        cod_pred, coee_pred = torch.sigmoid(cod_pred), torch.sigmoid(coee_pred)

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        cod = np.uint8(cod_pred.squeeze().detach().cpu().numpy() * 255)
        coee = np.uint8(coee_pred.squeeze().detach().cpu().numpy() * 255)

        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        cod_path = os.path.join(cod_folder, image_name + '.png')
        coee_path = os.path.join(coee_folder, image_name + '.png')
        cv2.imwrite(cod_path, cod)
        cv2.imwrite(coee_path, coee)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 3
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _, image_paths) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        # print(np.amax(input), np.amin(input), np.median(input))
        # print(np.amax(image), np.amin(image), np.median(image))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w, mean, std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        color.save(color_path)
        cv2.imwrite(color_path.replace('.png', '_RGB_scale.png'), image_scale)
        # os.system('cp -r %s %s'%(image_path, color_path.replace('.png', '_RGB.png')))
        image_RGB = args.read_image(image_path)
        cv2.imwrite(color_path.replace('.png', '_RGB.png'), image_RGB)
        print('Result saved to %s; originally from %s' %
              (color_path, image_path))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 4
0
def test(test_loader, data_list, model, classes, base_size, crop_h, crop_w,
         scales, binary_folder):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w)

        prediction /= len(scales)

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(binary_folder)
        image_path, _ = data_list[i]
        image_name = os.path.split(image_path)[-1]
        image_name = image_name.replace('png', 'npy')
        save_path = os.path.join(binary_folder, image_name)
        np.save(save_path, prediction)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 5
0
def ensemble(data_list,
             h,
             w,
             ensemble_way='mean_of_mean_and_max',
             threshold=0.85):
    for i, (image_path, target_path) in enumerate(data_list):
        image_name = os.path.split(image_path)[-1]
        pred_npy_name = image_name.replace('png', 'npy')
        prediction = np.zeros((h, w, args.folds * len(args.model_path)),
                              dtype=float)
        for fold_i in range(args.folds):
            for idx, model_i in enumerate(args.model_path):
                single_npy_path = args.result_save_dir + 'Fold{}/epoch_{}/'.format(
                    fold_i, model_i) + pred_npy_name
                pred_npy = np.load(single_npy_path)
                prediction[:, :,
                           fold_i * len(args.model_path) + idx] = pred_npy

        if ensemble_way == 'mean':
            prediction = np.mean(prediction, axis=-1)
        elif ensemble_way == 'max':
            prediction = np.max(prediction, axis=-1)
        elif ensemble_way == 'mean_of_mean_and_max':
            mean_prediction = np.mean(prediction, axis=-1)
            max_prediction = np.max(prediction, axis=-1)
            final_prediction = np.zeros((h, w, 2), dtype=float)
            final_prediction[:, :, 0] = mean_prediction
            final_prediction[:, :, 1] = max_prediction
            prediction = np.mean(final_prediction, axis=-1)

        prediction_cp = prediction.copy()
        prediction[(prediction_cp > threshold)
                   & (prediction_cp == threshold)] = 0
        prediction[prediction_cp < threshold] = 255

        ensemble_folder = args.result_save_dir + 'ensemble/'
        check_makedirs(ensemble_folder)
        binary = np.uint8(prediction)
        binary_path = os.path.join(ensemble_folder, image_name)
        cv2.imwrite(binary_path, binary)

    logger.info('save ensemble file finish!')
Ejemplo n.º 6
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, colors):
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model, image_scale, classes, crop_h,
                                        crop_w, h, w, mean, std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()

        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        color.save(color_path)
Ejemplo n.º 7
0
def test(test_loader, data_list, model, classes, mean, std, gray_folder,
         color_folder, derain_folder, edge_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (_, input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)

        with torch.no_grad():
            derain_outs, seg_outs, edge_outs = model(input)

        derain_outs = derain_outs.cpu().numpy()
        seg_outs = seg_outs.cpu().numpy()
        edge_outs = edge_outs.cpu().numpy()

        # process derain img
        derain_outs = np.transpose(derain_outs, (0, 2, 3, 1)).squeeze(axis=0)
        derain_outs *= std
        derain_outs += mean
        derain_outs = np.clip(derain_outs, a_max=255, a_min=0)
        derain_outs = derain_outs.astype('uint8')
        derain_outs = cv2.cvtColor(derain_outs.astype('uint8'),
                                   cv2.COLOR_RGB2BGR)

        # process seg pred
        seg_outs = np.transpose(seg_outs, (0, 2, 3, 1))
        seg_outs = np.argmax(seg_outs, axis=3).squeeze(axis=0)

        # process edge pred
        edge_outs = np.transpose(edge_outs,
                                 (0, 2, 3, 1)).squeeze(axis=3).squeeze(axis=0)
        edge_outs = np.clip(edge_outs, a_max=1, a_min=0)
        edge_outs = (edge_outs * 255).astype('uint8')

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        check_makedirs(derain_folder)
        check_makedirs(edge_folder)

        gray = np.uint8(seg_outs)
        color = colorize(gray, colors)
        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        derain_path = os.path.join(derain_folder, image_name + '.png')
        edge_path = os.path.join(edge_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        cv2.imwrite(derain_path, derain_outs)
        cv2.imwrite(edge_path, edge_outs)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h,
         crop_w, scales, gray_folder, color_folder, derain_folder, edge_folder,
         colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (_, input, _, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        derain_prediction = np.zeros((h, w, 3), dtype=float)
        seg_prediction = np.zeros((h, w, classes), dtype=float)
        edge_prediction = np.zeros((h, w), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            temp_derain_prediction, temp_seg_prediction, temp_edge_prediction = scale_process(
                model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
            derain_prediction += temp_derain_prediction
            seg_prediction += temp_seg_prediction
            edge_prediction += temp_edge_prediction
        derain_prediction /= len(scales)
        seg_prediction /= len(scales)
        edge_prediction /= len(scales)
        # seg_prediction = np.argmax(seg_prediction, axis=2)

        # process derain img
        derain_outs = derain_prediction
        derain_outs *= std
        derain_outs += mean
        derain_outs = np.clip(derain_outs, a_max=255, a_min=0)
        derain_outs = derain_outs.astype('uint8')
        derain_outs = cv2.cvtColor(derain_outs.astype('uint8'),
                                   cv2.COLOR_RGB2BGR)

        # process seg pred
        seg_outs = seg_prediction
        seg_outs = np.argmax(seg_outs, axis=2).squeeze()

        # process edge pred
        edge_outs = edge_prediction
        edge_outs = np.clip(edge_outs, a_max=1, a_min=0)
        edge_outs = (edge_outs * 255).astype('uint8')

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        check_makedirs(derain_folder)
        check_makedirs(edge_folder)
        gray = np.uint8(seg_outs)
        color = colorize(gray, colors)
        image_path, _, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')
        derain_path = os.path.join(derain_folder, image_name + '.png')
        edge_path = os.path.join(edge_folder, image_name + '.png')
        cv2.imwrite(gray_path, gray)
        cv2.imwrite(derain_path, derain_outs)
        cv2.imwrite(edge_path, edge_outs)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 9
0
Archivo: test.py Proyecto: lixin666/MGL
def main():
    global args, logger
    args = get_parser('config/cod_mgl50.yaml')
    check(args)
    logger = get_logger()
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(
        str(x) for x in args.test_gpu)
    logger.info(args)
    logger.info("=> creating model ...")
    logger.info("Classes: {}".format(args.classes))

    value_scale = 255
    mean = [0.485, 0.456, 0.406]
    mean = [item * value_scale for item in mean]
    std = [0.229, 0.224, 0.225]
    std = [item * value_scale for item in std]

    date_str = str(datetime.datetime.now().date())
    save_folder = args.save_folder + '/' + date_str
    check_makedirs(save_folder)
    cod_folder = os.path.join(save_folder, 'cod')
    coee_folder = os.path.join(save_folder, 'coee')

    test_transform = transform.Compose([
        transform.Resize((args.test_h, args.test_w)),
        transform.ToTensor(),
        transform.Normalize(mean=mean, std=std)
    ])

    test_data = dataset.SemData(split=args.split,
                                data_root=args.data_root,
                                data_list=args.test_list,
                                transform=test_transform)
    index_start = args.index_start
    if args.index_step == 0:
        index_end = len(test_data.data_list)
    else:
        index_end = min(index_start + args.index_step,
                        len(test_data.data_list))
    test_data.data_list = test_data.data_list[index_start:index_end]
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              num_workers=args.workers,
                                              pin_memory=True)

    if not args.has_prediction:
        if args.arch == 'mgl':
            from model.mglnet import MGLNet
            model = MGLNet(layers=args.layers,
                           classes=args.classes,
                           zoom_factor=args.zoom_factor,
                           pretrained=False,
                           args=args)
        #logger.info(model)
        model = torch.nn.DataParallel(model).cuda()
        cudnn.benchmark = True
        if os.path.isfile(args.model_path):
            logger.info("=> loading checkpoint '{}'".format(args.model_path))
            checkpoint = torch.load(args.model_path, map_location='cuda:0')
            model.load_state_dict(checkpoint['state_dict'], strict=False)
            logger.info("=> loaded checkpoint '{}', epoch {}".format(
                args.model_path, checkpoint['epoch']))

        else:
            raise RuntimeError("=> no checkpoint found at '{}'".format(
                args.model_path))
        test(test_loader, test_data.data_list, model, cod_folder, coee_folder)
    if args.split != 'test':
        calc_acc(test_data.data_list, cod_folder, coee_folder)
Ejemplo n.º 10
0
def run_test(test_loader, data_list, model, classes, mean, std, base_size,
             crop_h, crop_w, scales, gray_folder, color_folder, colors,
             is_flip):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    is_test = 'test' in data_list[0][0]
    for i, (input, _) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)
            prediction += scale_process(model,
                                        image_scale,
                                        classes,
                                        crop_h,
                                        crop_w,
                                        h,
                                        w,
                                        mean,
                                        std,
                                        FLIP=is_flip)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))
        check_makedirs(gray_folder)
        check_makedirs(color_folder)
        gray = np.uint8(prediction)
        color = colorize(gray, colors)
        image_path, _ = data_list[i]
        image_name = image_path.split('/')[-1].split('.')[0]
        gray_path = os.path.join(gray_folder, image_name + '.png')
        color_path = os.path.join(color_folder, image_name + '.png')

        if is_test:
            gray_labelid = trainID2labelID(gray)
            cv2.imwrite(gray_path, gray_labelid)
        else:
            cv2.imwrite(gray_path, gray)
        color.save(color_path)
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 11
0
def test(test_loader,
         data_list,
         model,
         classes,
         mean,
         std,
         base_size,
         crop_h,
         crop_w,
         scales,
         gray_folder,
         color_folder,
         colors,
         is_multi_patch=True,
         is_med=False):
    """
    crop_h, crop_w = 713, 713
    
    """
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    for i, (input, gt) in enumerate(test_loader):
        data_time.update(time.time() - end)
        input = np.squeeze(input.numpy(), axis=0)
        image = np.transpose(input, (1, 2, 0))
        h, w, _ = image.shape  #原始图像尺寸
        prediction = np.zeros((h, w, classes), dtype=float)
        for scale in scales:
            long_size = round(scale * base_size)  # 推理中图像底板设置成2048
            new_h = long_size
            new_w = long_size
            if h > w:
                new_w = round(long_size / float(h) * w)
            else:
                new_h = round(long_size / float(w) * h)
            image_scale = cv2.resize(image, (new_w, new_h),
                                     interpolation=cv2.INTER_LINEAR)  #
            if is_multi_patch:
                print('inplace multi inference with patches')
                prediction += scale_process(model, image_scale, classes,
                                            crop_h, crop_w, h, w, mean, std)
            else:
                print('inplane one inference')
                #prediction += scale_process_direct(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
                prediction += scale_process(model, image_scale, classes,
                                            new_h + 1, new_w + 1, h, w, mean,
                                            std)
        prediction /= len(scales)
        prediction = np.argmax(prediction, axis=2)
        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info(
                'Test: [{}/{}] '
                'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(
                    i + 1,
                    len(test_loader),
                    data_time=data_time,
                    batch_time=batch_time))

        _, target_path = data_list[i]
        image_name = get_image_name(target_path, is_med)
        # print(image_name)

        check_makedirs(gray_folder)
        gray = np.uint8(prediction)
        gray_path = os.path.join(gray_folder, image_name)
        check_makedirs(Path(gray_path).parent)
        cv2.imwrite(gray_path, gray)

        check_makedirs(color_folder)
        color = colorize(gray, colors)
        color_path = os.path.join(color_folder, image_name)
        color.save(color_path)

    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
Ejemplo n.º 12
0
def test(model, criterion, names):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    rooms = sorted(os.listdir(args.train_full_folder))
    rooms_split = [
        room for room in rooms if 'Area_{}'.format(args.test_area) in room
    ]
    gt_all, pred_all = np.array([]), np.array([])
    check_makedirs(args.save_folder)
    pred_save, gt_save = [], []
    for idx, room_name in enumerate(rooms_split):
        data_room, label_room, index_room, gt = data_prepare(
            os.path.join(args.train_full_folder, room_name))
        batch_point = args.num_point * args.test_batch_size
        batch_num = int(np.ceil(label_room.size / batch_point))
        end = time.time()
        output_room = np.array([])
        for i in range(batch_num):
            s_i, e_i = i * batch_point, min((i + 1) * batch_point,
                                            label_room.size)
            input, target, index = data_room[
                s_i:e_i, :], label_room[s_i:e_i], index_room[s_i:e_i]
            input = torch.from_numpy(input).float().view(
                -1, args.num_point, input.shape[1])
            target = torch.from_numpy(target).long().view(-1, args.num_point)
            with torch.no_grad():
                output = model(input.cuda())
            loss = criterion(output, target.cuda())  # for reference
            output = output.transpose(1, 2).contiguous().view(
                -1, args.classes).data.cpu().numpy()
            pred = np.argmax(output, axis=1)
            intersection, union, target = intersectionAndUnion(
                pred,
                target.view(-1).data.cpu().numpy(), args.classes,
                args.ignore_label)
            accuracy = sum(intersection) / (sum(target) + 1e-10)
            output_room = np.vstack([output_room, output
                                     ]) if output_room.size else output
            batch_time.update(time.time() - end)
            end = time.time()
            if ((i + 1) % args.print_freq == 0) or (i + 1 == batch_num):
                logger.info(
                    'Test: [{}/{}]-[{}/{}] '
                    'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                    'Loss {loss:.4f} '
                    'Accuracy {accuracy:.4f} '
                    'Points {gt.size}.'.format(idx + 1,
                                               len(rooms_split),
                                               i + 1,
                                               batch_num,
                                               batch_time=batch_time,
                                               loss=loss,
                                               accuracy=accuracy,
                                               gt=gt))
        '''
        unq, unq_inv, unq_cnt = np.unique(index_room, return_inverse=True, return_counts=True)
        index_array = np.split(np.argsort(unq_inv), np.cumsum(unq_cnt[:-1]))
        output_room = np.vstack([output_room, np.zeros((1, args.classes))])
        index_array_fill = np.array(list(itertools.zip_longest(*index_array, fillvalue=output_room.shape[0] - 1))).T
        pred = output_room[index_array_fill].sum(1)
        pred = np.argmax(pred, axis=1)
        '''
        pred = np.zeros((gt.size, args.classes))
        for j in range(len(index_room)):
            pred[index_room[j]] += output_room[j]
        pred = np.argmax(pred, axis=1)

        # calculation 1: add per room predictions
        intersection, union, target = intersectionAndUnion(
            pred, gt, args.classes, args.ignore_label)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        # calculation 2
        pred_all = np.hstack([pred_all, pred]) if pred_all.size else pred
        gt_all = np.hstack([gt_all, gt]) if gt_all.size else gt
        pred_save.append(pred), gt_save.append(gt)

    with open(
            os.path.join(args.save_folder,
                         "pred_{}.pickle".format(args.test_area)),
            'wb') as handle:
        pickle.dump({'pred': pred_save},
                    handle,
                    protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            os.path.join(args.save_folder,
                         "gt_{}.pickle".format(args.test_area)),
            'wb') as handle:
        pickle.dump({'gt': gt_save}, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # calculation 1
    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU1 = np.mean(iou_class)
    mAcc1 = np.mean(accuracy_class)
    allAcc1 = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    # calculation 2
    intersection, union, target = intersectionAndUnion(pred_all, gt_all,
                                                       args.classes,
                                                       args.ignore_label)
    iou_class = intersection / (union + 1e-10)
    accuracy_class = intersection / (target + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection) / (sum(target) + 1e-10)
    logger.info('Val result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU, mAcc, allAcc))
    logger.info('Val1 result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}.'.format(
        mIoU1, mAcc1, allAcc1))

    for i in range(args.classes):
        logger.info(
            'Class_{} Result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return mIoU, mAcc, allAcc, pred_all
Ejemplo n.º 13
0
def test(test_loader, data_list, model, classes, mean, std, base_size, crop_h, crop_w, scales, gray_folder, color_folder, colors):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    data_time = AverageMeter()
    batch_time = AverageMeter()
    model.eval()
    end = time.time()
    pred_path_list = []
    target_path_list = []
    check_makedirs(gray_folder)
    check_makedirs(color_folder)

    for i, (input, target, image_paths) in tqdm(enumerate(test_loader)):
        data_time.update(time.time() - end)
        # input = np.squeeze(input.numpy(), axis=0)
        # image = np.transpose(input, (1, 2, 0))
        # # print(np.amax(input), np.amin(input), np.median(input))
        # # print(np.amax(image), np.amin(image), np.median(image))
        # h, w, _ = image.shape
        # prediction = np.zeros((h, w, classes), dtype=float)
        # for scale in scales:
        #     long_size = round(scale * base_size)
        #     new_h = long_size
        #     new_w = long_size
        #     if h > w:
        #         new_w = round(long_size/float(h)*w)
        #     else:
        #         new_h = round(long_size/float(w)*h)
        #     image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
        #     prediction += scale_process(model, image_scale, classes, crop_h, crop_w, h, w, mean, std)
        # prediction /= len(scales)
        # prediction = np.argmax(prediction, axis=2)

        input = input.cuda(non_blocking=True)
        output = model(input)
        if args.zoom_factor != 8:
            output = F.interpolate(output, size=target.size()[1:], mode='bilinear', align_corners=True)
        prediction = torch.argmax(output, 1).cpu().numpy().squeeze()

        batch_time.update(time.time() - end)
        end = time.time()
        if ((i + 1) % 10 == 0) or (i + 1 == len(test_loader)):
            logger.info('Test: [{}/{}] '
                        'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
                        'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
                                                                                    data_time=data_time,
                                                                                    batch_time=batch_time))
        gray = np.uint8(prediction)
        if args.test_in_nyu_label_space:
            gray = map_to_nyu(gray, args.dataset_name_pred)

        gray_path = os.path.join(gray_folder, '%02d.png'%i)
        pred_path_list.append(gray_path)
        cv2.imwrite(gray_path, gray)

        if args.test_has_gt:
            target = np.uint8(target.squeeze().cpu().numpy())
            if args.test_in_nyu_label_space:
                target = map_to_nyu(target, args.dataset_name)
            gray_target_path = os.path.join(gray_folder, '%02d_target.png'%i)
            cv2.imwrite(gray_target_path, target)
            target_path_list.append(gray_target_path)

        if i <= 100:
            color = colorize(gray, colors)
            image_path, _ = data_list[i]
            image_name = image_path.split('/')[-1].split('.')[0]
            color_path = os.path.join(color_folder, '%02d.png'%i)
            color.save(color_path)
            image_RGB = args.read_image(image_path, resize_to_target_size=True)
            cv2.imwrite(color_path.replace('.png', '_RGB.png'), image_RGB)
            target_color = colorize(target, colors)
            color_path = os.path.join(color_folder, '%02d_GT.png'%i)
            target_color.save(color_path)
            print('Result saved to %s; originally from %s'%(color_path, image_path))
        # if i > 100:
        #     break
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    
    assert len(pred_path_list) == len(target_path_list)
    return pred_path_list, target_path_list
Ejemplo n.º 14
0
def test(model, criterion, names):
    logger.info('>>>>>>>>>>>>>>>> Start Evaluation >>>>>>>>>>>>>>>>')
    batch_time = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    target_meter = AverageMeter()

    model.eval()
    data_file = os.path.join(args.data_root,
                             'scannet_{}.pickle'.format(args.split))
    file_pickle = open(data_file, 'rb')
    xyz_all = pickle.load(file_pickle, encoding='latin1')
    label_all = pickle.load(file_pickle, encoding='latin1')
    file_pickle.close()
    gt_all, pred_all = np.array([]), np.array([])
    vox_acc = []
    check_makedirs(args.save_folder)
    pred_save, gt_save = [], []
    for idx in range(len(xyz_all)):
        points, labels = xyz_all[idx], label_all[idx].astype(np.int32)
        gt = labels - 1
        gt[labels == 0] = 255
        data_room, label_room, index_room = data_prepare(points, gt)
        batch_point = args.num_point * args.test_batch_size
        batch_num = int(np.ceil(label_room.size / batch_point))
        end = time.time()
        output_room = np.array([])
        for i in range(batch_num):
            s_i, e_i = i * batch_point, min((i + 1) * batch_point,
                                            label_room.size)
            input, target, index = data_room[
                s_i:e_i, :], label_room[s_i:e_i], index_room[s_i:e_i]
            input = torch.from_numpy(input).float().view(
                -1, args.num_point, input.shape[1])
            target = torch.from_numpy(target).long().view(-1, args.num_point)
            with torch.no_grad():
                output = model(input.cuda())
            loss = criterion(output, target.cuda())  # for reference
            output = output.transpose(1, 2).contiguous().view(
                -1, args.classes).data.cpu().numpy()
            pred = np.argmax(output, axis=1)
            intersection, union, target = intersectionAndUnion(
                pred,
                target.view(-1).data.cpu().numpy(), args.classes,
                args.ignore_label)
            accuracy = sum(intersection) / (sum(target) + 1e-10)
            output_room = np.vstack([output_room, output
                                     ]) if output_room.size else output
            batch_time.update(time.time() - end)
            end = time.time()
            if ((i + 1) % args.print_freq == 0) or (i + 1 == batch_num):
                logger.info(
                    'Test: [{}/{}]-[{}/{}] '
                    'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                    'Loss {loss:.4f} '
                    'Accuracy {accuracy:.4f} '
                    'Points {gt.size}.'.format(idx + 1,
                                               len(xyz_all),
                                               i + 1,
                                               batch_num,
                                               batch_time=batch_time,
                                               loss=loss,
                                               accuracy=accuracy,
                                               gt=gt))

        pred = np.zeros((gt.size, args.classes))
        for j in range(len(index_room)):
            pred[index_room[j]] += output_room[j]
        pred = np.argmax(pred, axis=1)

        # calculation 1: add per room predictions
        intersection, union, target = intersectionAndUnion(
            pred, gt, args.classes, args.ignore_label)
        intersection_meter.update(intersection)
        union_meter.update(union)
        target_meter.update(target)
        # calculation 2
        pred_all = np.hstack([pred_all, pred]) if pred_all.size else pred
        gt_all = np.hstack([gt_all, gt]) if gt_all.size else gt
        pred_save.append(pred), gt_save.append(gt)

        # compute voxel accuracy (follow scannet, pointnet++ and pointcnn)
        res = 0.0484
        coord_min, coord_max = np.min(points, axis=0), np.max(points, axis=0)
        nvox = np.ceil((coord_max - coord_min) / res)
        vidx = np.ceil((points - coord_min) / res)
        vidx = vidx[:,
                    0] + vidx[:, 1] * nvox[0] + vidx[:, 2] * nvox[0] * nvox[1]
        uvidx, vpidx = np.unique(vidx, return_index=True)
        # compute voxel label
        uvlabel = np.array(gt)[vpidx]
        uvpred = np.array(pred)[vpidx]
        # compute voxel accuracy (ignore label 0 which is scannet unannotated)
        c_accvox = np.sum(np.equal(uvpred, uvlabel))
        c_ignore = np.sum(np.equal(uvlabel, 255))
        vox_acc.append([c_accvox, len(uvlabel) - c_ignore])

    with open(
            os.path.join(args.save_folder,
                         "pred_{}.pickle".format(args.split)), 'wb') as handle:
        pickle.dump({'pred': pred_save},
                    handle,
                    protocol=pickle.HIGHEST_PROTOCOL)
    with open(
            os.path.join(args.save_folder, "gt_{}.pickle".format(args.split)),
            'wb') as handle:
        pickle.dump({'gt': gt_save}, handle, protocol=pickle.HIGHEST_PROTOCOL)

    # calculation 1
    iou_class = intersection_meter.sum / (union_meter.sum + 1e-10)
    accuracy_class = intersection_meter.sum / (target_meter.sum + 1e-10)
    mIoU1 = np.mean(iou_class)
    mAcc1 = np.mean(accuracy_class)
    allAcc1 = sum(intersection_meter.sum) / (sum(target_meter.sum) + 1e-10)

    # calculation 2
    intersection, union, target = intersectionAndUnion(pred_all, gt_all,
                                                       args.classes,
                                                       args.ignore_label)
    iou_class = intersection / (union + 1e-10)
    accuracy_class = intersection / (target + 1e-10)
    mIoU = np.mean(iou_class)
    mAcc = np.mean(accuracy_class)
    allAcc = sum(intersection) / (sum(target) + 1e-10)
    # compute avg voxel acc
    vox_acc = np.sum(vox_acc, 0)
    voxAcc = vox_acc[0] * 1.0 / vox_acc[1]
    logger.info(
        'Val result: mIoU/mAcc/allAcc/voxAcc {:.4f}/{:.4f}/{:.4f}/{:.4f}.'.
        format(mIoU, mAcc, allAcc, voxAcc))
    logger.info(
        'Val111 result: mIoU/mAcc/allAcc {:.4f}/{:.4f}/{:.4f}/{:.4f}.'.format(
            mIoU1, mAcc1, allAcc1, voxAcc))

    for i in range(args.classes):
        logger.info(
            'Class_{} Result: iou/accuracy {:.4f}/{:.4f}, name: {}.'.format(
                i, iou_class[i], accuracy_class[i], names[i]))
    logger.info('<<<<<<<<<<<<<<<<< End Evaluation <<<<<<<<<<<<<<<<<')
    return mIoU, mAcc, allAcc, pred_all