def val(args, val_loader, model):
    """
    args:
      val_loader: loaded for validation dataset
      model: model
    return: mean IoU and IoU class
    """
    # evaluation mode
    model.eval()
    total_batches = len(val_loader)

    data_list = []
    for i, (input, label, size, name) in enumerate(val_loader):
        start_time = time.time()
        with torch.no_grad():
            # input_var = Variable(input).cuda()
            input_var = input.cuda()
            output = model(input_var)
        time_taken = time.time() - start_time
        print("[%d/%d]  time: %.2f" % (i + 1, total_batches, time_taken))
        output = output.cpu().data[0].numpy()
        gt = np.asarray(label[0].numpy(), dtype=np.uint8)
        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        data_list.append([gt.flatten(), output.flatten()])

    meanIoU, per_class_iu = get_iou(data_list, args.classes)
    return meanIoU, per_class_iu
Example #2
0
File: val.py Project: favoMJ/FEENet
def test(args, test_loader, model):
    """
    args:
      test_loader: loaded for test dataset
      model: model
    return: class IoU and mean IoU
    """
    # evaluation or test mode
    model.eval()
    total_batches = len(test_loader)

    data_list = []
    for i, (input, label,size, name) in enumerate(test_loader):
        with torch.no_grad():
            input_var = input.cuda()
        start_time = time.time()
        output = model(input_var)
        torch.cuda.synchronize()
        time_taken = time.time() - start_time
        print('[%d/%d]  time: %.2f' % (i + 1, total_batches, time_taken))
        output = output[0].cpu().data[0].numpy()
        gt = np.asarray(label[0].numpy(), dtype=np.uint8)
        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        data_list.append([gt.flatten(), output.flatten()])

        # save the predicted image
        if args.save:
            save_predict(output, gt, name[0], args.dataset, args.save_seg_dir,
                         output_grey=False, output_color=True, gt_color=True)

    meanIoU, per_class_iu = get_iou(data_list, args.classes)
    return meanIoU, per_class_iu
Example #3
0
def predict(args, test_loader, model):
    """
    args:
      test_loader: loaded for test dataset, for those that do not provide label on the test set
      model: model
    return: class IoU and mean IoU
    """
    # evaluation or test mode
    model.eval()
    total_batches = len(test_loader)
    data_list = []
    pbar = tqdm(iterable=enumerate(test_loader),
                total=total_batches,
                desc='Predicting')
    for i, (input, label, size, name) in pbar:
        with torch.no_grad():
            input_var = input.cuda().float()
        output = model(input_var)
        torch.cuda.synchronize()

        output = output.cpu().data[0].numpy()
        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        gt = np.asarray(label[0].numpy(), dtype=np.uint8)

        # Save the predict greyscale output for Cityscapes official evaluation
        # Modify image name to meet official requirement
        save_predict(output,
                     None,
                     name[0],
                     args.dataset,
                     args.save_seg_dir,
                     output_grey=False,
                     output_color=True,
                     gt_color=False)
        data_list.append([gt.flatten(), output.flatten()])
    meanIoU, per_class_iu = get_iou(data_list, args.classes)
    print('miou {}\nclass iou {}'.format(meanIoU, per_class_iu))
    result = args.save_seg_dir + '/results.txt'
    with open(result, 'w') as f:
        f.write(str(meanIoU))
        f.write('\n{}'.format(str(per_class_iu)))
Example #4
0
def val(args, val_loader, criteria, model, epoch):
    """
    args:
      val_loader: loaded for validation dataset
      model: model
    return: mean IoU and IoU class
    """
    # evaluation mode
    model.eval()
    total_batches = len(val_loader)

    data_list = []
    val_loss = []
    pbar = tqdm(iterable=enumerate(val_loader),
                total=total_batches,
                desc='Val')
    for iteration, (input, label, size, name) in pbar:
        with torch.no_grad():
            input_var = input.cuda().float()
            output = model(input_var)
            if type(output) is tuple:
                output = output[0]

        loss = criteria(output, label.long().cuda())
        val_loss.append(loss)
        output = output.cpu().data[0].numpy()
        gt = np.asarray(label[0].numpy(), dtype=np.uint8)
        output = output.transpose(1, 2, 0)
        output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
        data_list.append([gt.flatten(), output.flatten()])
    val_loss = sum(val_loss) / len(val_loss)
    if epoch % args.val_miou_epochs == 0:
        meanIoU, per_class_iu = get_iou(data_list, args.classes)
        return val_loss, meanIoU, per_class_iu
    else:
        return val_loss
Example #5
0
def predict_sliding(args, net, image, tile_size, classes):
    total_batches = len(image)
    data_list = []
    pbar = tqdm(iterable=enumerate(image),
                total=total_batches,
                desc='Predicting')
    for i, (input, gt, size, name) in pbar:
        image_size = input.shape  # (1,3,3328,3072)
        overlap = 1 / 3  # 每次滑动的覆盖率为1/3
        # print(image_size, tile_size)
        stride = ceil(
            tile_size[0] *
            (1 - overlap))  # 滑动步长:512*(1-1/3) = 513     512*(1-1/3)= 342
        tile_rows = int(ceil((image_size[2] - tile_size[0]) / stride) +
                        1)  # 行滑动步数:(3072-512)/342+1=9
        tile_cols = int(ceil((image_size[3] - tile_size[1]) / stride) +
                        1)  # 列滑动步数:(3328-512)/342+1=10
        full_probs = np.zeros((image_size[2], image_size[3],
                               classes))  # 初始化全概率矩阵shape(3072,3328,3)
        count_predictions = np.zeros((image_size[2], image_size[3],
                                      classes))  # 初始化计数矩阵shape(3072,3328,3)

        for row in range(tile_rows):  # row = 0,1     0,1,2,3,4,5,6,7,8
            for col in range(
                    tile_cols):  # col = 0,1,2,3     0,1,2,3,4,5,6,7,8,9
                x1 = int(col * stride)  # 起始位置x1 = 0 * 513 = 0   0*342
                y1 = int(row * stride)  # y1 = 0 * 513 = 0   0*342
                x2 = min(x1 + tile_size[1],
                         image_size[3])  # 末位置x2 = min(0+512, 3328)
                y2 = min(y1 + tile_size[0],
                         image_size[2])  # y2 = min(0+512, 3072)
                x1 = max(int(x2 - tile_size[1]),
                         0)  # 重新校准起始位置x1 = max(512-512, 0)
                y1 = max(int(y2 - tile_size[0]), 0)  # y1 = max(512-512, 0)

                img = input[:, :, y1:y2,
                            x1:x2]  # 滑动窗口对应的图像 imge[:, :, 0:512, 0:512]
                padded_img = pad_image(img,
                                       tile_size)  # padding 确保扣下来的图像为512*512
                # plt.imshow(padded_img)
                # plt.show()

                # 将扣下来的部分传入网络,网络输出概率图。
                with torch.no_grad():
                    input_var = torch.from_numpy(padded_img).cuda().float()
                    padded_prediction = net(input_var)

                    if type(padded_prediction) is tuple:
                        padded_prediction = padded_prediction[0]

                    torch.cuda.synchronize()

                if isinstance(padded_prediction, list):
                    padded_prediction = padded_prediction[
                        0]  # shape(1,3,512,512)

                padded_prediction = padded_prediction.cpu().data[0].numpy(
                ).transpose(1, 2, 0)  # 通道位置变换(512,512,3)
                prediction = padded_prediction[
                    0:img.shape[2],
                    0:img.shape[3], :]  # 扣下相应面积 shape(512,512,3)
                count_predictions[y1:y2, x1:x2] += 1  # 窗口区域内的计数矩阵加1
                full_probs[y1:y2, x1:x2] += prediction  # 窗口区域内的全概率矩阵叠加预测结果

        # average the predictions in the overlapping regions
        full_probs /= count_predictions  # 全概率矩阵 除以 计数矩阵 即得 平均概率
        # visualize normalization Weights
        # plt.imshow(np.mean(count_predictions, axis=2))
        # plt.show()
        full_probs = np.asarray(np.argmax(full_probs, axis=2), dtype=np.uint8)
        '''设置输出原图和预测图片的颜色灰度还是彩色'''
        gt = gt[0].numpy()
        # 计算miou
        data_list.append([gt.flatten(), full_probs.flatten()])
        save_predict(full_probs,
                     gt,
                     name[0],
                     args.dataset,
                     args.save_seg_dir,
                     output_grey=False,
                     output_color=True,
                     gt_color=True)

    meanIoU, per_class_iu = get_iou(data_list, args.classes)
    print('miou {}\nclass iou {}'.format(meanIoU, per_class_iu))
    result = args.save_seg_dir + '/results.txt'
    with open(result, 'w') as f:
        f.write(str(meanIoU))
        f.write('\n{}'.format(str(per_class_iu)))