示例#1
0
def main():
    args = parser.parse_args()
    output_dir = Path(args.output_dir)

    normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                            std=[0.5, 0.5, 0.5])
    valid_transform = custom_transforms.Compose(
        [custom_transforms.ArrayToTensor(), normalize])
    val_set = SequenceFolder(args.data,
                             transform=valid_transform,
                             seed=args.seed,
                             sequence_length=args.sequence_length)

    print('{} samples found in {} valid scenes'.format(len(val_set),
                                                       len(val_set.scenes)))
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    dpsnet = PSNet(args.nlabel, args.mindepth).cuda()
    weights = torch.load(args.pretrained_dps)
    dpsnet.load_state_dict(weights['state_dict'])
    dpsnet.eval()

    output_dir = Path(args.output_dir)
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    errors = np.zeros((2, 8, int(len(val_loader) / args.print_freq) + 1),
                      np.float32)
    with torch.no_grad():
        for ii, (tgt_img, ref_imgs, ref_poses, intrinsics, intrinsics_inv,
                 tgt_depth, scale_) in enumerate(val_loader):
            if ii % args.print_freq == 0:
                i = int(ii / args.print_freq)
                tgt_img_var = Variable(tgt_img.cuda())
                ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
                ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
                intrinsics_var = Variable(intrinsics.cuda())
                intrinsics_inv_var = Variable(intrinsics_inv.cuda())
                tgt_depth_var = Variable(tgt_depth.cuda())
                scale = scale_.numpy()[0]

                # compute output
                pose = torch.cat(ref_poses_var, 1)
                start = time.time()
                output_depth = dpsnet(tgt_img_var, ref_imgs_var, pose,
                                      intrinsics_var, intrinsics_inv_var)
                elps = time.time() - start
                mask = (tgt_depth <= args.maxdepth) & (
                    tgt_depth >= args.mindepth) & (tgt_depth == tgt_depth)

                tgt_disp = args.mindepth * args.nlabel / tgt_depth
                output_disp = args.mindepth * args.nlabel / output_depth

                output_disp_ = torch.squeeze(output_disp.data.cpu(), 1)
                output_depth_ = torch.squeeze(output_depth.data.cpu(), 1)

                errors[0, :,
                       i] = compute_errors_test(tgt_depth[mask] / scale,
                                                output_depth_[mask] / scale)
                errors[1, :,
                       i] = compute_errors_test(tgt_disp[mask] / scale,
                                                output_disp_[mask] / scale)

                print('Elapsed Time {} Abs Error {:.4f}'.format(
                    elps, errors[0, 0, i]))

                if args.output_print:
                    output_disp_n = (output_disp_).numpy()[0]
                    np.save(output_dir / '{:04d}{}'.format(i, '.npy'),
                            output_disp_n)
                    disp = (255 * tensor2array(torch.from_numpy(output_disp_n),
                                               max_value=args.nlabel,
                                               colormap='bone')).astype(
                                                   np.uint8)
                    imsave(output_dir / '{:04d}_disp{}'.format(i, '.png'),
                           disp)

    mean_errors = errors.mean(2)
    error_names = [
        'abs_rel', 'abs_diff', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3'
    ]
    print("{}".format(args.output_dir))
    print("Depth Results : ")
    print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".
          format(*error_names))
    print(
        "{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}"
        .format(*mean_errors[0]))

    print("Disparity Results : ")
    print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".
          format(*error_names))
    print(
        "{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}"
        .format(*mean_errors[1]))

    np.savetxt(output_dir / 'errors.csv',
               mean_errors,
               fmt='%1.4f',
               delimiter=',')
示例#2
0
def validate_with_gt(args, val_loader, mvdnet, epoch, output_writers=[]):
    batch_time = AverageMeter()
    error_names = [
        'abs_rel', 'abs_diff', 'sq_rel', 'a1', 'a2', 'a3', 'mean_angle'
    ]
    test_error_names = [
        'abs_rel', 'abs_diff', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3',
        'mean_angle'
    ]
    errors = AverageMeter(i=len(error_names))
    test_errors = AverageMeter(i=len(test_error_names))
    log_outputs = len(output_writers) > 0

    output_dir = Path(args.output_dir)
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)

    # switch to evaluate mode
    mvdnet.eval()

    end = time.time()
    with torch.no_grad():
        for i, (tgt_img, ref_imgs, gt_nmap, ref_poses, intrinsics,
                intrinsics_inv, tgt_depth) in enumerate(val_loader):
            tgt_img_var = Variable(tgt_img.cuda())
            ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
            gt_nmap_var = Variable(gt_nmap.cuda())
            ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
            intrinsics_var = Variable(intrinsics.cuda())
            intrinsics_inv_var = Variable(intrinsics_inv.cuda())
            tgt_depth_var = Variable(tgt_depth.cuda())

            pose = torch.cat(ref_poses_var, 1)

            if (pose != pose).any():
                continue

            if args.dataset == 'sceneflow':
                factor = (1.0 / args.scale) * intrinsics_var[:, 0, 0] / 1050.0
                factor = factor.view(-1, 1, 1)
            else:
                factor = torch.ones(
                    (tgt_depth_var.size(0), 1, 1)).type_as(tgt_depth_var)

            # get mask
            mask = (tgt_depth_var <= args.nlabel * args.mindepth * factor *
                    3) & (tgt_depth_var >= args.mindepth * factor) & (
                        tgt_depth_var == tgt_depth_var)

            if not mask.any():
                continue

            output_depth, nmap = mvdnet(tgt_img_var,
                                        ref_imgs_var,
                                        pose,
                                        intrinsics_var,
                                        intrinsics_inv_var,
                                        factor=factor.unsqueeze(1))
            output_disp = args.nlabel * args.mindepth / (output_depth)
            if args.dataset == 'sceneflow':
                output_disp = (args.nlabel *
                               args.mindepth) * 3 / (output_depth)
                output_depth = (args.nlabel * 3) * (args.mindepth *
                                                    factor) / output_disp

            tgt_disp_var = ((1.0 / args.scale) *
                            intrinsics_var[:, 0, 0].view(-1, 1, 1) /
                            tgt_depth_var)

            if args.dataset == 'sceneflow':
                output = torch.squeeze(output_disp.data.cpu(), 1)
                errors_ = compute_errors_train(tgt_disp_var.cpu(), output,
                                               mask)
                test_errors_ = list(
                    compute_errors_test(tgt_disp_var.cpu()[mask],
                                        output[mask]))
            else:
                output = torch.squeeze(output_depth.data.cpu(), 1)
                errors_ = compute_errors_train(tgt_depth, output, mask)
                test_errors_ = list(
                    compute_errors_test(tgt_depth[mask], output[mask]))

            n_mask = (gt_nmap_var.permute(0, 2, 3, 1)[0, :, :] != 0)
            n_mask = n_mask[:, :, 0] | n_mask[:, :, 1] | n_mask[:, :, 2]
            total_angles_m = compute_angles(
                gt_nmap_var.permute(0, 2, 3, 1)[0], nmap[0])

            mask_angles = total_angles_m[n_mask]
            total_angles_m[~n_mask] = 0
            errors_.append(
                torch.mean(mask_angles).item()
            )  #/mask_angles.size(0)#[torch.sum(mask_angles).item(), (mask_angles.size(0)),  torch.sum(mask_angles < 7.5).item(), torch.sum(mask_angles < 15).item(), torch.sum(mask_angles < 30).item(), torch.sum(mask_angles < 45).item()]
            test_errors_.append(torch.mean(mask_angles).item())
            errors.update(errors_)
            test_errors.update(test_errors_)
            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            if args.output_print:
                np.save(output_dir / '{:04d}{}'.format(i, '_depth.npy'),
                        output.numpy()[0])
                plt.imsave(output_dir / '{:04d}_gt{}'.format(i, '.png'),
                           tgt_depth.numpy()[0],
                           cmap='rainbow')
                imsave(output_dir / '{:04d}_aimage{}'.format(i, '.png'),
                       np.transpose(tgt_img.numpy()[0], (1, 2, 0)))
                np.save(output_dir / '{:04d}_cam{}'.format(i, '.npy'),
                        intrinsics_var.cpu().numpy()[0])
                np.save(output_dir / '{:04d}{}'.format(i, '_normal.npy'),
                        nmap.cpu().numpy()[0])

            if i % args.print_freq == 0:
                print(
                    'valid: Time {} Abs Error {:.4f} ({:.4f}) Abs angle Error {:.4f} ({:.4f}) Iter {}/{}'
                    .format(batch_time, test_errors.val[0], test_errors.avg[0],
                            test_errors.val[-1], test_errors.avg[-1], i,
                            len(val_loader)))
    if args.output_print:
        np.savetxt(output_dir / args.ttype + 'errors.csv',
                   test_errors.avg,
                   fmt='%1.4f',
                   delimiter=',')
        np.savetxt(output_dir / args.ttype + 'angle_errors.csv',
                   test_errors.avg,
                   fmt='%1.4f',
                   delimiter=',')
    return errors.avg, error_names
示例#3
0
def validate_with_gt(args,
                     val_loader,
                     mvdnet,
                     depth_cons,
                     epoch,
                     output_writers=[]):
    batch_time = AverageMeter()
    error_names = [
        'abs_rel', 'abs_diff', 'sq_rel', 'a1', 'a2', 'a3', 'mean_angle'
    ]
    test_error_names = [
        'abs_rel', 'abs_diff', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3',
        'mean_angle'
    ]
    test_error_names1 = [
        'abs_rel', 'abs_diff', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3',
        'mean_angle'
    ]
    errors = AverageMeter(i=len(error_names))
    test_errors = AverageMeter(i=len(test_error_names))
    test_errors1 = AverageMeter(i=len(test_error_names1))
    log_outputs = len(output_writers) > 0

    # switch to evaluate mode
    if args.train_cons:
        depth_cons.eval()
    else:
        mvdnet.eval()

    end = time.time()
    with torch.no_grad():
        for i, (tgt_img, ref_imgs, gt_nmap, ref_poses, intrinsics,
                intrinsics_inv, tgt_depth) in enumerate(val_loader):
            tgt_img_var = Variable(tgt_img.cuda())
            ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
            gt_nmap_var = Variable(gt_nmap.cuda())
            ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
            intrinsics_var = Variable(intrinsics.cuda())
            intrinsics_inv_var = Variable(intrinsics_inv.cuda())
            tgt_depth_var = Variable(tgt_depth.cuda())

            pose = torch.cat(ref_poses_var, 1)
            if (pose != pose).any():
                continue

            outputs = mvdnet(tgt_img_var, ref_imgs_var, pose, intrinsics_var,
                             intrinsics_inv_var)
            output_depth = outputs[0]
            output_depth1 = output_depth.clone()
            nmap = outputs[1]
            nmap1 = nmap.clone()

            output_depth1 = output_depth.clone()
            if args.train_cons:
                outputs = depth_cons(output_depth, nmap.permute(0, 3, 1, 2))
                nmap = outputs[:, 1:].permute(0, 2, 3, 1)
                output_depth = outputs[:, 0].unsqueeze(1)

            mask = (tgt_depth <= args.nlabel * args.mindepth) & (
                tgt_depth >= args.mindepth) & (tgt_depth == tgt_depth)
            #mask = (tgt_depth <= 10) & (tgt_depth >= args.mindepth) & (tgt_depth == tgt_depth) #for DeMoN testing, to compare against DPSNet you might need to turn on this for fair comparison

            if not mask.any():
                continue

            output_depth1_ = torch.squeeze(output_depth1.data.cpu(), 1)
            output_depth_ = torch.squeeze(output_depth.data.cpu(), 1)

            errors_ = compute_errors_train(tgt_depth, output_depth_, mask)
            test_errors_ = list(
                compute_errors_test(tgt_depth[mask], output_depth_[mask]))
            test_errors1_ = list(
                compute_errors_test(tgt_depth[mask], output_depth1_[mask]))

            n_mask = (gt_nmap_var.permute(0, 2, 3, 1)[0, :, :] != 0)
            n_mask = n_mask[:, :, 0] | n_mask[:, :, 1] | n_mask[:, :, 2]
            total_angles_m = compute_angles(
                gt_nmap_var.permute(0, 2, 3, 1)[0], nmap[0])
            total_angles_m1 = compute_angles(
                gt_nmap_var.permute(0, 2, 3, 1)[0], nmap1[0])

            mask_angles = total_angles_m[n_mask]
            mask_angles1 = total_angles_m1[n_mask]
            total_angles_m[~n_mask] = 0
            total_angles_m1[~n_mask] = 0
            errors_.append(
                torch.mean(mask_angles).item()
            )  #/mask_angles.size(0)#[torch.sum(mask_angles).item(), (mask_angles.size(0)),  torch.sum(mask_angles < 7.5).item(), torch.sum(mask_angles < 15).item(), torch.sum(mask_angles < 30).item(), torch.sum(mask_angles < 45).item()]
            test_errors_.append(torch.mean(mask_angles).item())
            test_errors1_.append(torch.mean(mask_angles1).item())
            errors.update(errors_)
            test_errors.update(test_errors_)
            test_errors1.update(test_errors1_)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
            if i % args.print_freq == 0 or i == len(val_loader) - 1:
                if args.train_cons:
                    print(
                        'valid: Time {} Prev Error {:.4f}({:.4f}) Curr Error {:.4f} ({:.4f}) Prev angle Error {:.4f} ({:.4f}) Curr angle Error {:.4f} ({:.4f}) Iter {}/{}'
                        .format(batch_time, test_errors1.val[0],
                                test_errors1.avg[0], test_errors.val[0],
                                test_errors.avg[0], test_errors1.val[-1],
                                test_errors1.avg[-1], test_errors.val[-1],
                                test_errors.avg[-1], i, len(val_loader)))
                else:
                    print(
                        'valid: Time {} Rel Error {:.4f} ({:.4f}) Angle Error {:.4f} ({:.4f}) Iter {}/{}'
                        .format(batch_time, test_errors.val[0],
                                test_errors.avg[0], test_errors.val[-1],
                                test_errors.avg[-1], i, len(val_loader)))
            if args.output_print:
                output_dir = Path(args.output_dir)
                if not os.path.isdir(output_dir):
                    os.mkdir(output_dir)
                plt.imsave(output_dir / '{:04d}_map{}'.format(i, '_dps.png'),
                           output_depth_.numpy()[0],
                           cmap='rainbow')
                np.save(output_dir / '{:04d}{}'.format(i, '_dps.npy'),
                        output_depth_.numpy()[0])
                if args.train_cons:
                    plt.imsave(output_dir /
                               '{:04d}_map{}'.format(i, '_prev.png'),
                               output_depth1_.numpy()[0],
                               cmap='rainbow')
                    np.save(output_dir / '{:04d}{}'.format(i, '_prev.npy'),
                            output_depth1_.numpy()[0])
                # np.save(output_dir/'{:04d}{}'.format(i,'_gt.npy'),tgt_depth.numpy()[0])
                # imsave(output_dir/'{:04d}_aimage{}'.format(i,'.png'), np.transpose(tgt_img.numpy()[0],(1,2,0)))
                # np.save(output_dir/'{:04d}_cam{}'.format(i,'.npy'),intrinsics_var.cpu().numpy()[0])
    if args.output_print:
        np.savetxt(output_dir / args.ttype + 'errors.csv',
                   test_errors.avg,
                   fmt='%1.4f',
                   delimiter=',')
        np.savetxt(output_dir / args.ttype + 'prev_errors.csv',
                   test_errors1.avg,
                   fmt='%1.4f',
                   delimiter=',')
    return errors.avg, error_names
示例#4
0
def main():
    args = parser.parse_args()

    normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                            std=[0.5, 0.5, 0.5])
    valid_transform = custom_transforms.Compose(
        [custom_transforms.ArrayToTensor(), normalize])
    val_set = SequenceFolder(args.data,
                             transform=valid_transform,
                             seed=args.seed,
                             ttype=args.ttype,
                             dataset='',
                             sequence_length=args.sequence_length,
                             add_geo=args.geo,
                             depth_source=args.depth_init,
                             pose_source='%s_poses.txt' %
                             args.pose_init if args.pose_init else 'poses.txt',
                             scale=False,
                             size=0,
                             req_gt=True,
                             get_path=True)

    print('{} samples found in {} valid scenes'.format(len(val_set),
                                                       len(val_set.scenes)))
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    depth_net = PSNet(args.nlabel, args.mindepth, add_geo_cost=args.geo).cuda()
    weights = torch.load(args.pretrained_dps)
    depth_net.load_state_dict(weights['state_dict'])
    depth_net.eval()

    output_dir = Path(args.output_dir)
    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    errors = np.zeros((2, 13, int(np.ceil(len(val_loader) / args.print_freq))),
                      np.float32)
    with torch.no_grad():

        for ii, (tgt_img, ref_imgs, ref_poses, poses_gt, intrinsics,
                 intrinsics_inv, tgt_depth, ref_depths,
                 tgt_path) in enumerate(val_loader):
            if ii % args.print_freq == 0:
                i = int(ii / args.print_freq)
                tgt_img_var = Variable(tgt_img.cuda())
                ref_imgs_var = [Variable(img.cuda()) for img in ref_imgs]
                ref_poses_var = [Variable(pose.cuda()) for pose in ref_poses]
                poses_gt_var = [
                    Variable(pose_gt.cuda()) for pose_gt in poses_gt
                ]

                ref_depths_var = [Variable(dep.cuda()) for dep in ref_depths]
                intrinsics_var = Variable(intrinsics.cuda())
                intrinsics_inv_var = Variable(intrinsics_inv.cuda())

                # compute output
                pose = torch.cat(ref_poses_var, 1)
                poses_gt = torch.cat(poses_gt_var, 1)
                rel_pose = poses_gt.squeeze().data.cpu().numpy()

                scale = float(np.sqrt(rel_pose[:3, 3].dot(rel_pose[:3, 3])))

                start = time.time()
                output_depth = depth_net(tgt_img_var, ref_imgs_var, pose,
                                         intrinsics_var, intrinsics_inv_var,
                                         ref_depths_var)

                elps = time.time() - start
                tgt_disp = args.mindepth * args.nlabel / tgt_depth
                output_disp = args.mindepth * args.nlabel / output_depth

                mask = (tgt_depth <= args.maxdepth) & (tgt_depth >= 0.5) & (
                    tgt_depth == tgt_depth)

                tgt_depth = tgt_depth / scale
                output_depth_scaled = output_depth / scale
                output_disp_ = torch.squeeze(output_disp.data.cpu(), 1)
                output_depth_ = torch.squeeze(output_depth_scaled.data.cpu(),
                                              1)
                if args.save:
                    output_depth_n = torch.squeeze(output_depth.data.cpu(),
                                                   1).numpy()[0]
                    save_path = tgt_path[0][:-4] + "_" + args.save + ".npy"
                    if not os.path.exists(save_path):
                        np.save(save_path, output_depth_n)
                errors[0, :10,
                       i] = compute_errors_test(tgt_depth[mask],
                                                output_depth_[mask])
                errors[1, :10,
                       i] = compute_errors_test(tgt_disp[mask],
                                                output_disp_[mask])

                print('iter{}, Elapsed Time {} Abs Error {:.10f}'.format(
                    i, elps, errors[0, 0, i]))

                if args.output_print:
                    output_disp_n = (output_disp_).numpy()[0]
                    np.save(output_dir / '{:08d}{}'.format(i, '.npy'),
                            output_disp_n)
                    disp = (255 * tensor2array(torch.from_numpy(output_disp_n),
                                               max_value=args.nlabel,
                                               colormap='bone')).astype(
                                                   np.uint8)
                    disp = disp.transpose(1, 2, 0)
                    imsave(output_dir / '{:08d}_disp{}'.format(i, '.png'),
                           disp)

    mean_errors = errors.mean(2)
    error_names = [
        'abs_rel', 'abs_diff', 'sq_rel', 'rms', 'log_rms', 'a1', 'a2', 'a3',
        'L1-inv', "sc-inv", 'ra', 'rd', 'ta'
    ]
    print("{}".format(args.output_dir))
    print("Depth & angle Results : ")
    print(
        "{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}"
        .format(*error_names))
    print(
        "{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, "
        "{:10.4f}, {:10.4f}, {:10.4f}".format(*mean_errors[0]))

    np.savetxt(output_dir / 'errors.csv',
               mean_errors,
               fmt='%1.4f',
               delimiter=',')