Пример #1
0
def eval_on_validation():
    if not os.path.exists(config.run_dir):
        os.mkdir(config.run_dir)
    model = getattr(models, config.model)()
    model = torch.nn.DataParallel(model)    # multi-gpu
    model.cuda()
    print 'test on validation set.', config.model
    print model
    if config.load_model_path:
        model.load_state_dict(torch.load(config.load_model_path))
    # data
    test_data = KITTIRoadFusion(config.root, split='val', num_features=19)
    test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False, num_workers=4)
    # test
    model.eval()
    eval_acc = 0
    eval_acc_cls = 0
    eval_mean_iu = 0
    eval_fwavacc = 0
    li_pred = []
    li_gt = []
    total_time = 0
    for i, (name, im, cloud, theta, shift,  lb) in enumerate(test_dataloader):
        im, cloud, theta, shift, lb = Variable(im), Variable(cloud), Variable(theta), Variable(shift), Variable(lb)
        im, cloud, theta, shift, lb = im.float().cuda(), cloud.float().cuda(), theta.float().cuda(), shift.float().cuda(), lb.long().cuda()
        start = time.clock()
        _, pred = model(im, cloud, theta, shift)  # inference
        end = time.clock()
        total_time += (end-start)
        # pred = F.upsample_bilinear(pred, scale_factor=4)
        # save image
        label_pred = pred.data.cpu().numpy().squeeze()
        label_pred = np.array(label_pred*255, dtype=np.uint8)
        filename = os.path.join(config.run_dir, name[0])
        print filename
        # cv2.imwrite(filename, label_pred)
        # Mean IoU
        label_true = lb.data.cpu().numpy().astype(np.int8)
        label_pred = pred.data.cpu().numpy().squeeze(0)
        label_pred = (label_pred > 0.5).astype(np.int8)
        for (label, prob) in zip(label_true, label_pred):
            acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(label, prob, n_class=2)
            eval_acc += acc
            eval_acc_cls += acc_cls
            eval_mean_iu += mean_iu
            eval_fwavacc += fwavacc
        # MaxF
        label_pred = pred.data.cpu().numpy().squeeze()
        label_true = lb.data.cpu().numpy().squeeze()
        li_pred.append(label_pred)
        li_gt.append(label_true)

    print 'Runtime ############# time(s) : %f ##########' % (total_time / test_dataloader.__len__())
    print 'Validation ======ACC: %lf,Mean IoU: %lf======' % (eval_acc / test_dataloader.__len__(),
                                                             eval_mean_iu / test_dataloader.__len__())
    eval_road(li_pred, li_gt)
Пример #2
0
def eval_on_validation_bev():
    if not os.path.exists(config.run_dir):
        os.mkdir(config.run_dir)
    model = getattr(models, config.model)()
    model = torch.nn.DataParallel(model)  # multi-gpu
    model.cuda()
    print 'test on validation set.', config.model
    print model
    if config.load_model_path:
        model.load_state_dict(torch.load(config.load_model_path))
    # data
    bev = BirdsEyeView()
    test_data = KITTIRoadFusion(config.root, split='val', num_features=19, return_bev=True)
    test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False, num_workers=4)
    # test
    model.eval()
    eval_acc = 0
    eval_acc_cls = 0
    eval_mean_iu = 0
    eval_fwavacc = 0
    li_pred = []
    li_gt = []
    for i, (name, im, cloud, theta, shift, _, lb) in enumerate(test_dataloader):
        im, cloud, theta, shift, lb = Variable(im), Variable(cloud), Variable(theta), Variable(shift), Variable(lb)
        im, cloud, theta, shift, lb = im.float().cuda(), cloud.float().cuda(), theta.float().cuda(), shift.float().cuda(), lb.long().cuda()
        _, pred = model(im, cloud, theta, shift)  # inference

        pred = pred.data.cpu().numpy().squeeze()
        theta = theta.data.cpu().numpy().squeeze()
        shift = shift.data.cpu().numpy().squeeze()

        label_pred = bev.transformLable2BEV((pred*255).astype(np.uint8), theta, shift)
        label_true = lb.data.cpu().numpy().squeeze()

        # save image
        filename = os.path.join(config.run_dir, name[0])
        print filename
        cv2.imwrite(filename, label_pred)

        label_pred = label_pred/255.
        # Mean IoU
        label_true_hard = np.expand_dims(label_true.astype(np.int8), axis=0)
        label_pred_hard = np.expand_dims((label_pred > 0.5).astype(np.int8), axis=0)
        for (label, prob) in zip(label_true_hard, label_pred_hard):
            acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(label, prob, n_class=2)
            eval_acc += acc
            eval_acc_cls += acc_cls
            eval_mean_iu += mean_iu
            eval_fwavacc += fwavacc
        # MaxF
        li_pred.append(label_pred)
        li_gt.append(label_true)
    print 'Validation ======ACC: %lf,Mean IoU: %lf======' % (eval_acc / test_dataloader.__len__(),
                                                             eval_mean_iu / test_dataloader.__len__())
    eval_road(li_pred, li_gt)
Пример #3
0
def val(model, dataloader):
    model.eval()
    eval_acc = 0
    eval_acc_cls = 0
    eval_mean_iu = 0
    eval_fwavacc = 0
    li_pred = []
    li_gt = []
    for i, (_, im, cloud, theta, shift, lb) in enumerate(dataloader):
        im, cloud, theta, shift, lb = Variable(im), Variable(cloud), Variable(theta), Variable(shift), Variable(lb)
        im, cloud, theta, shift, lb = im.float().cuda(), cloud.float().cuda(), theta.float().cuda(), shift.float().cuda(), lb.long().cuda()
        _, pred = model(im, cloud, theta, shift)
        # Mean IoU
        label_true = lb.data.cpu().numpy().astype(np.int8)
        label_pred = pred.data.cpu().numpy().squeeze(0)
        label_pred = (label_pred > 0.5).astype(np.int8)
        for (label, prob) in zip(label_true, label_pred):
            acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(label, prob, n_class=2)
            eval_acc += acc
            eval_acc_cls += acc_cls
            eval_mean_iu += mean_iu
            eval_fwavacc += fwavacc
        # MaxF
        label_pred = pred.data.cpu().numpy().squeeze()
        label_true = lb.data.cpu().numpy().squeeze()
        li_pred.append(label_pred)
        li_gt.append(label_true)
    print 'Validation ======ACC: %lf,Mean IoU: %lf======' % (eval_acc/dataloader.__len__(),
                                                             eval_mean_iu/dataloader.__len__())
    max_f = eval_road(li_pred, li_gt)
    model.train()
    return max_f