Exemple #1
0
opt = parser.parse_args()

checkpoint_dir = os.path.join('../results/2D',opt.name)
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)
utils.save_opt(checkpoint_dir,opt)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

print('loading dataset')
if opt.init is not None:
    init_pose_np = np.load(opt.init)
    init_pose = torch.from_numpy(init_pose_np)
else:
    init_pose = None
dataset = SimulatedPointCloud(opt.data_dir,init_pose)
loader = DataLoader(dataset,batch_size=opt.batch_size,shuffle=False)

loss_fn = eval('loss.'+opt.loss)

print('creating model')
model = DeepMapping2D(loss_fn=loss_fn,n_obs=dataset.n_obs, n_samples=opt.n_samples).to(device)
optimizer = optim.Adam(model.parameters(),lr=opt.lr)

if opt.model is not None:
    utils.load_checkpoint(opt.model,model,optimizer)

print('start training')
for epoch in range(opt.n_epochs):

    training_loss= 0.0
Exemple #2
0
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)
utils.save_opt(checkpoint_dir, opt)
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

print('loading dataset')
if opt.init is not None:
    init_pose_np = np.load(opt.init)
    init_pose = torch.from_numpy(init_pose_np)
else:
    init_pose = None

instance_num = 256
latent_size = opt.lat_size
dataset = SimulatedPointCloud(opt.data_dir, instance_num, init_pose)
loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False)

loss_fn = eval('loss.' + opt.loss)

print('creating model')
model = DeepMapping2D(n_lat=latent_size,
                      loss_fn=loss_fn,
                      n_obs=dataset.n_obs,
                      n_samples=opt.n_samples).to(device)

latent_vecs = []
for i in range(instance_num):
    vec = nn.Parameter(torch.ones(latent_size).normal_(0, 0.9).to(device),
                       requires_grad=True)
    latent_vecs.append(vec)
Exemple #3
0
                    default='point',
                    choices=['point', 'plane'],
                    help='minimization metric')
parser.add_argument('-d',
                    '--data_dir',
                    type=str,
                    default='../data/2D/',
                    help='dataset path')
opt = parser.parse_args()

checkpoint_dir = os.path.join('../results/2D', opt.name)
if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)
utils.save_opt(checkpoint_dir, opt)

dataset = SimulatedPointCloud(opt.data_dir)
n_pc = len(dataset)

pose_est = np.zeros((n_pc, 3), dtype=np.float32)
print('running icp')
for idx in range(n_pc - 1):
    dst, valid_dst = dataset[idx]
    src, valid_src = dataset[idx + 1]

    dst = dst[valid_dst, :].numpy()
    src = src[valid_src, :].numpy()

    _, R0, t0 = utils.icp(src, dst, metrics=opt.metric)
    if idx == 0:
        R_cum = R0
        t_cum = t0
Exemple #4
0
    init_pose = None

latent_vecs = []

latent_size = opt.latent_size
print("latent_size", latent_size)
instances_per_scene = 256
for i in range(instances_per_scene):
    vec = (torch.ones(latent_size).normal_(0, 0.8).to(device))
    vec.requires_grad = True
    latent_vecs.append(vec)

w = torch.ones(latent_size).normal_(0, 0.8).to(device)
w_r = torch.ones(latent_size).normal_(0, 0.8).to(device)

dataset = SimulatedPointCloud(opt.data_dir, instances_per_scene, init_pose)
loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False)
#print(len(dataset))
loss_fn = eval('loss.' + opt.loss)

print('creating model')
if opt.op == 'cat':
    latent_size *= 2
model = DeepMapping2D(loss_fn=loss_fn,
                      n_obs=dataset.n_obs,
                      latent_size=latent_size,
                      n_samples=opt.n_samples).to(device)
optimizer = optim.Adam([{
    "params": model.parameters(),
    "lr": opt.lr,
}, {