예제 #1
0
def evaluate_benchmark_icp(args, test_loader):
    in_dim = 6 if args.normal else 3
    model = IterativeBenchmark(in_dim=in_dim, niters=args.niters, gn=args.gn)
    if args.cuda:
        model = model.cuda()
        model.load_state_dict(torch.load(args.checkpoint))
    else:
        model.load_state_dict(
            torch.load(args.checkpoint, map_location=torch.device('cpu')))
    model.eval()

    dura = []
    r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic = [], [], [], [], [], []
    with torch.no_grad():
        for i, (ref_cloud, src_cloud, gtR,
                gtt) in tqdm(enumerate(test_loader)):
            if args.cuda:
                ref_cloud, src_cloud, gtR, gtt = ref_cloud.cuda(), src_cloud.cuda(), \
                                                 gtR.cuda(), gtt.cuda()
            tic = time.time()
            R1, t1, pred_ref_cloud = model(
                src_cloud.permute(0, 2, 1).contiguous(),
                ref_cloud.permute(0, 2, 1).contiguous())
            ref_cloud = torch.squeeze(ref_cloud).cpu().numpy()
            src_cloud_tmp = torch.squeeze(pred_ref_cloud[-1]).cpu().numpy()
            R2, t2, pred_ref_cloud = icp(npy2pcd(src_cloud_tmp),
                                         npy2pcd(ref_cloud))
            R2, t2 = torch.from_numpy(R2)[None, ...].to(R1), \
                     torch.from_numpy(t2)[None, ...].to(R1)
            R, t = R2 @ R1, torch.squeeze(R2 @ t1[:, :, None], dim=-1) + t2
            toc = time.time()
            dura.append(toc - tic)
            cur_r_mse, cur_r_mae, cur_t_mse, cur_t_mae, cur_r_isotropic, \
            cur_t_isotropic = compute_metrics(R, t, gtR, gtt)
            r_mse.append(cur_r_mse)
            r_mae.append(cur_r_mae)
            t_mse.append(cur_t_mse)
            t_mae.append(cur_t_mae)
            r_isotropic.append(cur_r_isotropic.cpu().detach().numpy())
            t_isotropic.append(cur_t_isotropic.cpu().detach().numpy())

            if args.show:
                src_cloud = torch.squeeze(src_cloud).cpu().numpy()
                pcd1 = npy2pcd(ref_cloud, 0)
                pcd2 = npy2pcd(src_cloud, 1)
                pcd3 = pred_ref_cloud
                o3d.visualization.draw_geometries([pcd1, pcd2, pcd3])

    r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic = \
        summary_metrics(r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic)

    return dura, r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic
예제 #2
0
def evaluate_benchmark(args, test_loader):
    model = IterativeBenchmark(in_dim=args.in_dim,
                               niters=args.niters,
                               gn=args.gn)
    if args.cuda:
        model = model.cuda()
        model.load_state_dict(torch.load(args.checkpoint))
    else:
        model.load_state_dict(torch.load(args.checkpoint, map_location=torch.device('cpu')))
    model.eval()

    dura = []
    r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic = [], [], [], [], [], []
    with torch.no_grad():
        for i, (ref_cloud, src_cloud, gtR, gtt) in tqdm(enumerate(test_loader)):
            if args.cuda:
                ref_cloud, src_cloud, gtR, gtt = ref_cloud.cuda(), src_cloud.cuda(), \
                                                 gtR.cuda(), gtt.cuda()
            tic = time.time()
            R, t, pred_ref_cloud = model(src_cloud.permute(0, 2, 1).contiguous(),
                    ref_cloud.permute(0, 2, 1).contiguous())
            toc = time.time()
            dura.append(toc - tic)
            cur_r_mse, cur_r_mae, cur_t_mse, cur_t_mae, cur_r_isotropic, \
            cur_t_isotropic = compute_metrics(R, t, gtR, gtt)
            r_mse.append(cur_r_mse)
            r_mae.append(cur_r_mae)
            t_mse.append(cur_t_mse)
            t_mae.append(cur_t_mae)
            r_isotropic.append(cur_r_isotropic.cpu().detach().numpy())
            t_isotropic.append(cur_t_isotropic.cpu().detach().numpy())

            if args.show:
                ref_cloud = torch.squeeze(ref_cloud).cpu().numpy()
                src_cloud = torch.squeeze(src_cloud).cpu().numpy()
                pred_ref_cloud = torch.squeeze(pred_ref_cloud[-1]).cpu().numpy()
                pcd1 = npy2pcd(ref_cloud, 0)
                pcd2 = npy2pcd(src_cloud, 1)
                pcd3 = npy2pcd(pred_ref_cloud, 2)
                o3d.visualization.draw_geometries([pcd1, pcd2, pcd3])

    r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic = \
        summary_metrics(r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic)

    return dura, r_mse, r_mae, t_mse, t_mae, r_isotropic, t_isotropic
예제 #3
0
def evaluate_benchmark(args, test_loader):
    model = IterativeBenchmark(in_dim1=args.in_dim, niters=args.niters)
    if args.cuda:
        model = model.cuda()
        model.load_state_dict(torch.load(args.checkpoint))
    else:
        model.load_state_dict(
            torch.load(args.checkpoint, map_location=torch.device('cpu')))
    model.eval()

    dura = []
    t_errors, R_errors, degree_errors = [], [], []
    with torch.no_grad():
        for i, (ref_cloud, src_cloud, gtR,
                gtt) in tqdm(enumerate(test_loader)):
            if args.cuda:
                ref_cloud, src_cloud, gtR, gtt = ref_cloud.cuda(), src_cloud.cuda(), \
                                                 gtR.cuda(), gtt.cuda()
            tic = time.time()
            R, t, pred_ref_cloud = model(
                src_cloud.permute(0, 2, 1).contiguous(),
                ref_cloud.permute(0, 2, 1).contiguous())
            toc = time.time()
            dura.append(toc - tic)

            cur_t_error = translation_error(t, -gtt)
            cur_R_error = rotation_error(R, gtR.permute(0, 2, 1).contiguous())
            cur_degree_error = degree_error(R,
                                            gtR.permute(0, 2, 1).contiguous())
            t_errors.append(cur_t_error.item())
            R_errors.append(cur_R_error.item())
            degree_errors.append(cur_degree_error.item())

            if args.show:
                print(cur_t_error.item(), cur_R_error.item(),
                      cur_degree_error.item())
                ref_cloud = torch.squeeze(ref_cloud).cpu().numpy()
                src_cloud = torch.squeeze(src_cloud).cpu().numpy()
                pred_ref_cloud = torch.squeeze(pred_ref_cloud).cpu().numpy()
                pcd1 = npy2pcd(ref_cloud, 0)
                pcd2 = npy2pcd(src_cloud, 1)
                pcd3 = npy2pcd(pred_ref_cloud, 2)
                o3d.visualization.draw_geometries([pcd1, pcd2, pcd3])
    return dura, np.mean(t_errors), np.mean(R_errors), np.mean(degree_errors)
예제 #4
0
def main():
    args = config_params()
    print(args)

    setup_seed(args.seed)
    if not os.path.exists(args.saved_path):
        os.makedirs(args.saved_path)
    summary_path = os.path.join(args.saved_path, 'summary')
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    checkpoints_path = os.path.join(args.saved_path, 'checkpoints')
    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)

    train_set = ModelNet40(root=args.root,
                           npts=args.train_npts,
                           train=True,
                           normal=args.normal,
                           mode=args.mode)
    test_set = ModelNet40(root=args.root,
                          npts=args.train_npts,
                          train=False,
                          normal=args.normal,
                          mode=args.mode)
    train_loader = DataLoader(train_set,
                              batch_size=args.batchsize,
                              shuffle=True,
                              num_workers=args.num_workers)
    test_loader = DataLoader(test_set,
                             batch_size=args.batchsize,
                             shuffle=False,
                             num_workers=args.num_workers)

    in_dim = 6 if args.normal else 3
    model = IterativeBenchmark(in_dim=in_dim, niters=args.niters, gn=args.gn)
    model = model.cuda()
    loss_fn = EMDLosspy().cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.milestones, gamma=args.gamma, last_epoch=-1)

    writer = SummaryWriter(summary_path)

    test_min_loss, test_min_r_mse_error, test_min_rot_error = \
        float('inf'), float('inf'), float('inf')
    for epoch in range(args.epoches):
        print('=' * 20, epoch + 1, '=' * 20)
        train_results = train_one_epoch(train_loader, model, loss_fn,
                                        optimizer)
        print_train_info(train_results)
        test_results = test_one_epoch(test_loader, model, loss_fn)
        print_train_info(test_results)

        if epoch % args.saved_frequency == 0:
            writer.add_scalar('Loss/train', train_results['loss'], epoch + 1)
            writer.add_scalar('Loss/test', test_results['loss'], epoch + 1)
            writer.add_scalar('RError/train', train_results['r_mse'],
                              epoch + 1)
            writer.add_scalar('RError/test', test_results['r_mse'], epoch + 1)
            writer.add_scalar('rotError/train', train_results['r_isotropic'],
                              epoch + 1)
            writer.add_scalar('rotError/test', test_results['r_isotropic'],
                              epoch + 1)
            writer.add_scalar('Lr', optimizer.param_groups[0]['lr'], epoch + 1)
        test_loss, test_r_error, test_rot_error = \
            test_results['loss'], test_results['r_mse'], test_results['r_isotropic']
        if test_loss < test_min_loss:
            saved_path = os.path.join(checkpoints_path, "test_min_loss.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_loss = test_loss
        if test_r_error < test_min_r_mse_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_rmse_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_r_mse_error = test_r_error
        if test_rot_error < test_min_rot_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_rot_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_rot_error = test_rot_error
        scheduler.step()
예제 #5
0
def main():
    args = config_params()
    print(args)

    setup_seed(args.seed)
    if not os.path.exists(args.saved_path):
        os.makedirs(args.saved_path)
    summary_path = os.path.join(args.saved_path, 'summary')
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    checkpoints_path = os.path.join(args.saved_path, 'checkpoints')
    if not os.path.exists(checkpoints_path):
        os.makedirs(checkpoints_path)

    train_set = ModelNet40(args.root, args.train_npts)
    test_set = ModelNet40(args.root, args.train_npts, False)
    train_loader = DataLoader(train_set,
                              batch_size=args.batchsize,
                              shuffle=True,
                              num_workers=args.num_workers)
    test_loader = DataLoader(test_set,
                             batch_size=args.batchsize,
                             shuffle=False,
                             num_workers=args.num_workers)

    model = IterativeBenchmark(in_dim1=args.in_dim, niters=args.niters)
    model = model.cuda()
    loss_fn = EMDLosspy()
    loss_fn = loss_fn.cuda()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=args.milestones, gamma=args.gamma, last_epoch=-1)

    writer = SummaryWriter(summary_path)

    test_min_loss, test_min_t_error, test_min_R_error, test_min_degree_error = \
        float('inf'), float('inf'), float('inf'), float('inf')
    for epoch in range(args.epoches):
        print('=' * 20, epoch + 1, '=' * 20)
        train_loss, train_t_error, train_R_error, train_degree_error = \
            train_one_epoch(train_loader, model, loss_fn, optimizer)
        print(
            'Train: loss: {:.4f}, t_error: {:.4f}, R_error: {:.4f}, degree_error: {:.4f}'
            .format(train_loss, train_t_error, train_R_error,
                    train_degree_error))
        test_loss, test_t_error, test_R_error, test_degree_error = \
            test_one_epoch(test_loader, model, loss_fn)
        print(
            'Test: loss: {:.4f}, t_error: {:.4f}, R_error: {:.4f}, degree_error: {:.4f}'
            .format(test_loss, test_t_error, test_R_error, test_degree_error))

        if epoch % args.saved_frequency == 0:
            writer.add_scalar('Loss/train', train_loss, epoch + 1)
            writer.add_scalar('Loss/test', test_loss, epoch + 1)
            writer.add_scalar('TError/train', train_t_error, epoch + 1)
            writer.add_scalar('TError/test', test_t_error, epoch + 1)
            writer.add_scalar('RError/train', train_R_error, epoch + 1)
            writer.add_scalar('RError/test', test_R_error, epoch + 1)
            writer.add_scalar('degreeError/train', train_degree_error,
                              epoch + 1)
            writer.add_scalar('degreeError/test', test_degree_error, epoch + 1)
            writer.add_scalar('Lr', optimizer.param_groups[0]['lr'], epoch + 1)
        if test_loss < test_min_loss:
            saved_path = os.path.join(checkpoints_path, "test_min_loss.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_loss = test_loss
        if test_t_error < test_min_t_error:
            saved_path = os.path.join(checkpoints_path, "test_min_t_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_t_error = test_t_error
        if test_R_error < test_min_R_error:
            saved_path = os.path.join(checkpoints_path, "test_min_R_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_R_error = test_R_error
        if test_degree_error < test_min_degree_error:
            saved_path = os.path.join(checkpoints_path,
                                      "test_min_degree_error.pth")
            torch.save(model.state_dict(), saved_path)
            test_min_degree_error = test_degree_error
        scheduler.step()