Exemple #1
0
log_step = args.log_step
saving_step = args.saving_step
curr_loss = 0

# To GPU
if use_cuda:
    print('Using GPU')
    model_gcn.cuda()
    for state in optimizer.state.values():
        for k, v in state.items():
            if torch.is_tensor(v):
                state[k] = v.cuda()
else:
    print('Using CPU')

print("nb trainable param", model_gcn.get_nb_trainable_params())

# Train
for epoch in range(1, nb_epochs+1):
    for n, data in enumerate(train_loader):

        im, gt_points, gt_normals = data
        if use_cuda:
            im = im.cuda()
            gt_points = gt_points.cuda()
            gt_normals = gt_normals.cuda()

        # Forward
        graph.reset()
        optimizer.zero_grad()
        pool = FeaturePooling(im)
log_step = args.log_step
saving_step = args.saving_step
curr_loss = 0

# To GPU
if use_cuda:
    print('Using GPU', flush=True)
    model_gcn.cuda()
    for state in optimizer.state.values():
        for k, v in state.items():
            if torch.is_tensor(v):
                state[k] = v.cuda()
else:
    print('Using CPU', flush=True)

print("nb trainable param", model_gcn.get_nb_trainable_params(), flush=True)

model_gcn.train()
# Train
for epoch in range(1, nb_epochs + 1):
    for n, data in enumerate(train_loader):
        ims, viewpoints, gt_points, gt_normals = data
        ims = np.transpose(ims, (1, 0, 2, 3, 4))
        viewpoints = np.transpose(viewpoints, (1, 0, 2))
        m, b, h, w, c = ims.shape

        t_ims = []
        for i in range(m):
            v = viewpoints[i].flatten()
            v = np.tile(v, (b, h, w, 1))
            im = np.concatenate((ims[i], v), axis=3)