Ejemplo n.º 1
0
 def visualize_image(self, writer, dataset, image, target, output,
                     global_step):
     grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
     writer.add_image('Image', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.max(
         output[:3], 1)[1].detach().cpu().numpy(),
                                                    dataset=dataset),
                            3,
                            normalize=False,
                            range=(0, 255))
     writer.add_image('Predicted label', grid_image, global_step)
     grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(
         target[:3], 1).detach().cpu().numpy(),
                                                    dataset=dataset),
                            3,
                            normalize=False,
                            range=(0, 255))
     writer.add_image('Groundtruth label', grid_image, global_step)
Ejemplo n.º 2
0
def main():
    args = parse_args()
    if args.dataset == 'Cityscapes':
        num_class = 19

    if args.net == 'resnet101':
        blocks = [2, 4, 23, 3]
        model = FPN(blocks, num_class, back_bone=args.net)

    if args.checkname is None:
        args.checkname = 'fpn-' + str(args.net)

    #evaluator = Evaluator(num_class)

    # Trained model path and name
    experiment_dir = args.experiment_dir
    #load_name = os.path.join(experiment_dir, 'checkpoint.pth.tar')
    load_name = os.path.join(
        r'/home/home_data/zjw/SemanticSegmentationUsingFPN_PanopticFeaturePyramidNetworks-master/run/Cityscapes/fpn-resnet101/model_best.pth.tar'
    )

    # Load trained model
    if not os.path.isfile(load_name):
        raise RuntimeError("=> no checkpoint found at '{}'".format(load_name))
    print('====>loading trained model from ' + load_name)
    checkpoint = torch.load(load_name)
    checkepoch = checkpoint['epoch']
    if args.cuda:
        model.load_state_dict(checkpoint['state_dict'])
    else:
        model.load_state_dict(checkpoint['state_dict'])

    # test
    img_path = r'./s1.jpeg'
    image = scipy.misc.imread(img_path, mode='RGB')

    image = image[:, :, ::-1]
    image = np.transpose(image, (2, 0, 1))
    #image[0] -= means[0]
    #image[1] -= means[1]
    #image[2] -= means[2]
    image = torch.from_numpy(image.copy()).float()
    image = image.unsqueeze(0)
    if args.cuda:
        image, model = image.cuda(), model.cuda()
    with torch.no_grad():
        output = model(image)
    pred = output.data.cpu().numpy()
    pred = np.argmax(pred, axis=1)

    # show result
    pred_rgb = decode_seg_map_sequence(pred, args.dataset, args.plot)
    #results.append(pred_rgb)
    save_image(pred_rgb, r'./testjpg.png')
Ejemplo n.º 3
0
def main():
    args = parse_args()

    if args.dataset == 'CamVid':
        num_class = 32
    elif args.dataset == 'Cityscapes':
        num_class = 19

    if args.net == 'resnet101':
        blocks = [2, 4, 23, 3]
        model = FPN(blocks, num_class, back_bone=args.net)

    if args.checkname is None:
        args.checkname = 'fpn-' + str(args.net)

    evaluator = Evaluator(num_class)

    # Trained model path and name
    experiment_dir = args.experiment_dir
    load_name = os.path.join(experiment_dir, 'checkpoint.pth.tar')

    # Load trained model
    if not os.path.isfile(load_name):
        raise RuntimeError("=> no checkpoint found at '{}'".format(load_name))
    print('====>loading trained model from ' + load_name)
    checkpoint = torch.load(load_name)
    checkepoch = checkpoint['epoch']
    if args.cuda:
        model.load_state_dict(checkpoint['state_dict'])
    else:
        model.load_state_dict(checkpoint['state_dict'])

    # Load image and save in test_imgs
    test_imgs = []
    test_label = []
    if args.dataset == "CamVid":
        root_dir = Path.db_root_dir('CamVid')
        test_file = os.path.join(root_dir, "val.csv")
        test_data = CamVidDataset(csv_file=test_file, phase='val')
        test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)

    elif args.dataset == "Cityscapes":
        kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
        #_, test_loader, _, _ = make_data_loader(args, **kwargs)
        _, val_loader, test_loader, _ = make_data_loader(args, **kwargs)
    else:
        raise RuntimeError("dataset {} not found.".format(args.dataset))

    # test
    Acc = []
    Acc_class = []
    mIoU = []
    FWIoU = []
    results = []
    for iter, batch in enumerate(val_loader):
        if args.dataset == 'CamVid':
            image, target = batch['X'], batch['l']
        elif args.dataset == 'Cityscapes':
            image, target = batch['image'], batch['label']
        else:
            raise NotImplementedError

        if args.cuda:
            image, target, model = image.cuda(), target.cuda(), model.cuda()
        with torch.no_grad():
            output = model(image)
        pred = output.data.cpu().numpy()
        pred = np.argmax(pred, axis=1)
        target = target.cpu().numpy()
        evaluator.add_batch(target, pred)

        # show result
        pred_rgb = decode_seg_map_sequence(pred, args.dataset, args.plot)
        results.append(pred_rgb)

    Acc = evaluator.Pixel_Accuracy()
    Acc_class = evaluator.Pixel_Accuracy_Class()
    mIoU = evaluator.Mean_Intersection_over_Union()
    FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()

    print('Mean evaluate result on dataset {}'.format(args.dataset))
    print('Acc:{:.3f}\tAcc_class:{:.3f}\nmIoU:{:.3f}\tFWIoU:{:.3f}'.format(Acc, Acc_class, mIoU, FWIoU))
Ejemplo n.º 4
0
pic = pic.resize((1024,512),Image.BILINEAR)
print('pic shape:{}'.format(pic.size))

pic = np.array(pic)
pic = Normalize(pic)

pic = np.transpose(pic,(2,0,1))
pic = torch.from_numpy(pic.copy()).float()
pic = pic.unsqueeze(0)


device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
pic = pic.to(device)


danet = torch.load('./danet.pth')
danet = danet.to(device)
danet = danet.eval()


out = danet(pic)
out_all = out[0]
out_p = out[1]
out_c = out[2]

out = out_all.data.cpu().numpy()
out = np.argmax(out,axis=1)
pre = decode_seg_map_sequence(out, plot=True)
save_image(pre,r'./testjpg.png')