Example #1
0
    cor_id = get_ini_cor(cor_img, args.d1, args.d2)

    # Gradient descent optimization
    if args.post_optimization:
        cor_id = optimize_cor_id(cor_id,
                                 edg_img,
                                 cor_img,
                                 num_iters=100,
                                 verbose=False)

    # Compute normalized corner error
    cor_error = ((gt - cor_id)**2).sum(1)**0.5
    cor_error /= np.sqrt(cor_img.shape[0]**2 + cor_img.shape[1]**2)
    pe_error = eval_PE(cor_id[0::2], cor_id[1::2], gt[0::2], gt[1::2])
    iou3d = eval_3diou(cor_id[1::2], cor_id[0::2], gt[1::2], gt[0::2])
    test_losses.update('CE(%)', cor_error.mean() * 100)
    test_losses.update('PE(%)', pe_error * 100)
    test_losses.update('3DIoU', iou3d)

    if k.startswith('pano'):
        test_pano_losses.update('CE(%)', cor_error.mean() * 100)
        test_pano_losses.update('PE(%)', pe_error * 100)
        test_pano_losses.update('3DIoU', iou3d)
    else:
        test_2d3d_losses.update('CE(%)', cor_error.mean() * 100)
        test_2d3d_losses.update('PE(%)', pe_error * 100)
        test_2d3d_losses.update('3DIoU', iou3d)

print('[RESULT overall     ] %s' % (test_losses), flush=True)
print('[RESULT panocontext ] %s' % (test_pano_losses), flush=True)
print('[RESULT stanford2d3d] %s' % (test_2d3d_losses), flush=True)
        x_augmented = torch.FloatTensor(x_augmented).to(device)
        en_list = encoder(x_augmented)
        edg_de_list = edg_decoder(en_list[::-1])
        cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])
        cor_tensor = torch.sigmoid(cor_de_list[-1])

        # Recover the effect from augmentation
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        cor_img = cor_img.transpose([0, 2, 3, 1]).mean(0)[..., 0]

    # Load ground truth corner label
    k = datas[-1][:-4]
    path = os.path.join(args.root_dir, 'label_cor', '%s.txt' % k)
    if not os.path.isfile(path):
        print('Skip', path)
        continue
    with open(path) as f:
        gt = np.array([line.strip().split() for line in f], np.float64)

    # Construct corner label from predicted corner map
    cor_id = get_ini_cor(cor_img, args.d1, args.d2)

    # Compute normalized corner error
    cor_error = ((gt - cor_id)**2).sum(1)**0.5
    cor_error /= np.sqrt(cor_img.shape[0]**2 + cor_img.shape[1]**2)
    test_losses.update('Corner error', cor_error.mean())

print('[RESULT] %s' % (test_losses), flush=True)
Example #3
0
test_losses = StatisticDict()
for ith, datas in enumerate(loader):
    print('processed %d batches out of %d' % (ith, len(loader)), end='\r', flush=True)
    with torch.no_grad():
        # Prepare data
        x = torch.cat([datas[i]
                      for i in range(len(args.input_cat))], dim=1).to(device)
        y_edg = datas[-2].to(device)
        y_cor = datas[-1].to(device)
        b_sz = x.size(0)

        # Feedforward
        en_list = encoder(x)
        edg_de_list = edg_decoder(en_list[::-1])
        cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])
        y_edg_ = edg_de_list[-1]
        y_cor_ = cor_de_list[-1]

        # Compute training objective loss
        loss_edg = criti(y_edg_, y_edg)
        loss_edg[y_edg == 0.] *= 0.2
        loss_edg = loss_edg.mean().item()
        loss_cor = criti(y_cor_, y_cor)
        loss_cor[y_cor == 0.] *= 0.2
        loss_cor = loss_cor.mean().item()

    test_losses.update('edg loss', loss_edg, weight=b_sz)
    test_losses.update('cor loss', loss_cor, weight=b_sz)

print('[RESULT] %s' % (test_losses), flush=True)
Example #4
0
        loss_cor[y_cor == 0.] *= 0.2
        loss_cor = loss_cor.mean()
        loss = loss_edg + loss_cor

        # backprop
        optimizer.zero_grad()
        loss.backward()
        nn.utils.clip_grad_norm_(chain(encoder.parameters(),
                                       edg_decoder.parameters(),
                                       cor_decoder.parameters()),
                                 3.0,
                                 norm_type='inf')
        optimizer.step()

        # Statitical result
        train_losses.update('edg loss', loss_edg.item())
        train_losses.update('cor loss', loss_cor.item())
        if args.cur_iter % args.disp_iter == 0:
            print('iter %d (epoch %d) | lr %.6f | %s' %
                  (args.cur_iter, ith_epoch, args.running_lr, train_losses),
                  flush=True)

    # Dump model
    if ith_epoch % args.save_every == 0:
        torch.save(
            encoder.state_dict(),
            os.path.join(args.ckpt, args.id,
                         'epoch_%d_encoder.pth' % ith_epoch))
        torch.save(
            edg_decoder.state_dict(),
            os.path.join(args.ckpt, args.id,
    # Load ground truth corner label
    k = datas[-1][:-4]
    path = os.path.join(args.root_dir, 'label_cor', '%s.txt' % k)
    with open(path) as f:
        gt = np.array([line.strip().split() for line in f], np.float64)

    # Construct corner label from predicted corner map
    cor_id = get_ini_cor(cor_img, args.d1, args.d2)

    # Compute normalized corner error
    cor_error = ((gt - cor_id)**2).sum(1)**0.5
    cor_error /= np.sqrt(cor_img.shape[0]**2 + cor_img.shape[1]**2)
    x_error = np.abs(gt[:, 0] - cor_id[:, 0]).mean()
    y_error = np.abs(gt[:, 1] - cor_id[:, 1]).mean()
    test_losses.update('Corner error', cor_error.mean())
    test_losses.update('X error', x_error.mean())
    test_losses.update('Y error', y_error.mean())

    if k.startswith('pano'):
        test_pano_losses.update('Corner error', cor_error.mean())
        test_pano_losses.update('X error', x_error.mean())
        test_pano_losses.update('Y error', y_error.mean())
    else:
        test_2d3d_losses.update('Corner error', cor_error.mean())
        test_2d3d_losses.update('X error', x_error.mean())
        test_2d3d_losses.update('Y error', y_error.mean())

print('[RESULT overall     ] %s' % (test_losses), flush=True)
print('[RESULT panocontext ] %s' % (test_pano_losses), flush=True)
print('[RESULT stanford2d3d] %s' % (test_2d3d_losses), flush=True)