Exemplo n.º 1
0
def validate(args):

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path,
                         split=args.split,
                         is_transform=True,
                         img_size=(args.img_rows, args.img_cols))
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader,
                                batch_size=args.batch_size,
                                num_workers=4)

    # Setup Model
    model = torch.load(args.model_path)
    model.eval()

    if torch.cuda.is_available():
        model.cuda(args.gpu)

    gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(valloader)):
        if torch.cuda.is_available():
            images = Variable(images.cuda(args.gpu))
            labels = Variable(labels.cuda(args.gpu))
        else:
            images = Variable(images)
            labels = Variable(labels)

        outputs = model(images)
        pred = outputs.data.max(1)[1].cpu().numpy()
        pred = np.squeeze(pred)
        pred = cv2.resize(pred,
                          labels.size()[1:][::-1],
                          interpolation=cv2.INTER_NEAREST)
        pred = np.expand_dims(pred, axis=0)
        gt = labels.data.cpu().numpy()

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)

    score, class_iou = scores(gts, preds, n_class=n_classes)

    for k, v in score.items():
        print k, v

    for i in range(n_classes):
        print i, class_iou[i]
Exemplo n.º 2
0
def validate():

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path, img_size=args.img_size)

    n_classes = loader.n_classes
    n_channels = loader.n_channels

    valloader = data.DataLoader(loader,
                                batch_size=args.batch_size,
                                num_workers=4,
                                shuffle=True)

    # Setup Model
    model = torch.load(args.model_path)
    model.eval()

    if torch.cuda.is_available():
        model.cuda(0)

    gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(valloader)):
        if i >= args.max_samples:
            break
        if torch.cuda.is_available():
            images = Variable(images.cuda(0))
            labels = Variable(labels.cuda(0))
        else:
            images = Variable(images)
            labels = Variable(labels)

        outputs = model(images)

        pred = np.squeeze((torch.max(outputs.data, 1,
                                     keepdim=True))[1].cpu().numpy())
        gt = np.squeeze(labels.data.cpu().numpy())

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)

    score, class_iou = scores(gts, preds, n_class=n_classes)

    for k, v in score.items():
        print(k, v)

    for i in range(n_classes):
        print(i, class_iou[i])
Exemplo n.º 3
0
def validate(args):

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path,
                         split=args.split,
                         is_transform=True,
                         img_size=(args.img_rows, args.img_cols))
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader,
                                batch_size=args.batch_size,
                                num_workers=4)

    # Setup Model
    model = torch.load(args.model_path)
    model.eval()

    gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(valloader)):
        if torch.cuda.is_available():
            model = torch.nn.DataParallel(model,
                                          device_ids=range(
                                              torch.cuda.device_count()))
            images = Variable(images.cuda(0))
            labels = Variable(labels.cuda(0))
        else:
            images = Variable(images)
            labels = Variable(labels)

        outputs = model(images)
        pred = np.squeeze(outputs.data.max(1)[1].cpu().numpy(), axis=1)
        gt = labels.data.cpu().numpy()

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)

    score, class_iou = scores(gts, preds, n_class=n_classes)

    for k, v in score.items():
        print(k, v)

    for i in range(n_classes):
        print(i, class_iou[i])
Exemplo n.º 4
0
def eval_metrics(args, gts, preds, verbose=True):

    classes = [
        'Sky', 'Building', 'Column-Pole', 'Road', 'Sidewalk', 'Tree',
        'Sign-Symbol', 'Fence', 'Car', 'Pedestrain', 'Bicyclist'
    ]
    class_order = [1, 5, 0, 8, 6, 3, 9, 7, 2, 4,
                   10]  # class order on the paper

    score, class_iou, class_acc = scores(gts, preds, args.n_classes)
    results_str = ''

    for k, v in score.items():
        if verbose:
            print(k, v)
        results_str += str(k) + ': ' + str(v) + '\n'

    if verbose:
        print 'class iou:'
    results_str += '\nclass iou:\n'
    for i in range(len(classes)):
        if verbose:
            print(classes[class_order[i]], class_iou[class_order[i]])
        results_str += str(class_iou[class_order[i]]) + ' '
    if verbose:
        print 'class acc:'
    results_str += '\n\nclass acc:\n'
    for i in range(len(classes)):
        if verbose:
            print(classes[class_order[i]], class_acc[class_order[i]])
        results_str += str(class_acc[class_order[i]]) + ' '
    results_str += '\n'

    if args.save_output:
        print 'Save results to ', args.out_dir
        f = open(os.path.join(args.out_dir, 'results.txt'), 'w')
        f.write(results_str)
        f.close()

    return results_str, score
Exemplo n.º 5
0
def validate(args):
    ###################
    supervised = args.supervised # If set in the command line, it is a boolean with a value of True.
    ###################

    ############################################
    if supervised:
        # When pre_trained = 'gt', i.e. when using supervised image-net weights, images were normalized with image-net mean.
        image_transform = None
    else:
        # When pre_trained = 'self' or 'no', i.e. in the self-supervised case, or unsupervised case, the input images are normalized this way:
        image_transform = transforms.Compose([transforms.ToTensor(),
                                              transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path, split=args.split, is_transform=True,
                         img_size=(args.img_rows, args.img_cols), image_transform=image_transform)
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader, batch_size=args.batch_size, num_workers=4)

    ############################################
    # Load the trained Model
    assert(args.netF != '' and args.netS != '')
    print('\n' + '-' * 60)
    netF = torch.load(args.netF)
    netS = torch.load(args.netS)
    print('Loading the trained networks and for evaluation.')
    print('-' * 60)

    ############################################
    padder = padder_layer(pad_size=100)
    netF.eval() # Eval() function should be applied to the model at the evaluation phase
    netS.eval()
    padder.eval()

    ############################################
    # Porting the networks to CUDA
    if torch.cuda.is_available():
        netF.cuda(args.gpu)
        netS.cuda(args.gpu)
        padder.cuda(args.gpu)

    ############################################
    # Evaluation:
    gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(valloader)):
        ######################
        # Porting the data to Autograd variables and CUDA (if available)
        if torch.cuda.is_available():
            images = Variable(images.cuda(args.gpu))
            labels = Variable(labels.cuda(args.gpu))
        else:
            images = Variable(images)
            labels = Variable(labels)

        ######################
        # Passing the data through the networks and Computing the score maps
        padded_images = padder(images)
        feature_maps = netF(padded_images)
        score_maps = netS(feature_maps)
        outputs = F.upsample(score_maps, labels.size()[1:], mode='bilinear')
        pred = outputs.data.max(1)[1].cpu().numpy()

        gt = labels.data.cpu().numpy()
        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)

    ######################
    # Computing the mean-IoU and other scores
    score, class_iou = scores(gts, preds, n_class=n_classes)

    for k, v in score.items():
        print k, v

    for i in range(n_classes):
        print i, class_iou[i]
Exemplo n.º 6
0
def validate(args):

    # Setup Dataloader
    data_loader = get_loader(args.dataset)
    data_path = get_data_path(args.dataset)
    loader = data_loader(data_path,
                         split=args.split,
                         is_transform=True,
                         img_size=(args.img_rows, args.img_cols))
    n_classes = loader.n_classes
    valloader = data.DataLoader(loader,
                                batch_size=args.batch_size,
                                num_workers=4)

    # Setup Model
    model = torch.load(args.model_path)
    model.eval()

    # Setup visdom for visualization
    vis = visdom.Visdom(port=VISDOM_PORT)

    if torch.cuda.is_available():
        model.cuda(CUDA_ID)

    gts, preds = [], []
    for i, (images, labels) in tqdm(enumerate(valloader)):
        if torch.cuda.is_available():
            images = Variable(images.cuda(CUDA_ID))
            labels = Variable(labels.cuda(CUDA_ID))
        else:
            images = Variable(images)
            labels = Variable(labels)

        outputs = model(images)
        pred = outputs.data.max(1)[1].cpu().numpy()
        gt = labels.data.cpu().numpy()
        if i % 50 == 0:
            # prediction mask
            vis.images(
                np.expand_dims(pred, axis=1),
                opts=dict(title='Prediction masks', caption='...'),
            )
            # target mask
            vis.images(
                np.expand_dims(gt, axis=1),
                opts=dict(title='Target masks', caption='...'),
            )

            #colored_target = loader.decode_segmap(gt[0])
            #colored_target = np.transpose(colored_target, [2,0,1])
            #vis.images(colored_target, opts=dict(title='Target masks', caption='...'),)

        for gt_, pred_ in zip(gt, pred):
            gts.append(gt_)
            preds.append(pred_)

    score, class_iou = scores(gts, preds, n_class=n_classes)

    for k, v in score.items():
        print k, v

    for i in range(n_classes):
        print i, class_iou[i]