예제 #1
0
        edg_de_list = edg_decoder(en_list[::-1])
        cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])

        edg_tensor = torch.sigmoid(edg_de_list[-1])
        cor_tensor = torch.sigmoid(cor_de_list[-1])

        # Recover the effect from augmentation
        edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

    # Merge all results from augmentation
    edgmap = edg_img.transpose([0, 2, 3, 1]).mean(0).copy()
    cormap = cor_img.transpose([0, 2, 3, 1]).mean(0)[..., 0].copy()

    # Post processing to extract layout
    cor_id = get_ini_cor(cormap, args.d1, args.d2)
    if args.post_optimization:
        cor_id = optimize_cor_id(cor_id,
                                 edgmap,
                                 cormap,
                                 num_iters=100,
                                 verbose=False)

    # Draw extracted layout on source image
    bon_img = draw_boundary_from_cor_id(cor_id.copy(), i_img * 255)

    # Composite all result in one image
    all_in_one = 0.3 * edgmap + 0.3 * cormap[..., None] + 0.4 * i_img
    all_in_one = draw_boundary_from_cor_id(cor_id.copy(), all_in_one * 255)

    # Dump results
예제 #2
0
def visual_persp(img_glob,
                 output_dir,
                 device=torch.device("cpu"),
                 path_prefix='ckpt/pre',
                 flip=True,
                 rotate=[],
                 d1=21,
                 d2=3):

    # Check input arguments validation
    for path in glob.glob(img_glob):
        assert os.path.isfile(path), '%s not found' % path
    assert os.path.isdir(output_dir), '%s is not a directory' % output_dir
    for rotate in rotate:
        assert 0 <= rotate and rotate <= 1, 'elements in --rotate should in [0, 1]'

    # Prepare model
    layoutnet = LayoutNet().to(device)
    torch_pretrained = torchfile.load(
        '/home/jupyter/Shapes.ai/pytorch-layoutnet/ckpt/perspfull_lsun_type_pretrained.t7'
    )
    total_parameter = 0
    for p in layoutnet.parameters():
        total_parameter += np.prod(p.size())
    print('pytorch parameters: ', total_parameter)
    print('t7 file: ', torch_pretrained.shape[0])
    idx = 0
    idx = copy_params(idx, layoutnet.parameters(), torch_pretrained)
    torch.save(layoutnet.state_dict(), '/ckpt/persp_pretrained.pth')
    #layoutnet.load_state_dict(torch.load('/ckpt/persp_pretrained.pth'))

    # Load path to visualization
    img_paths = sorted(glob.glob(img_glob))

    # Process each input
    for i_path in img_paths:
        print('img  path:', i_path)

        # Load input images
        i_img = np.array(Image.open(i_path).resize(
            (512, 512)), np.float32) / 255
        #l_img = np.array(Image.open(l_path), np.float32) / 255
        x_img = i_img.transpose([2, 0, 1])
        print('x_img shape: ', x_img.shape)

        # Augment data
        x_imgs_augmented, aug_type = augment(x_img, flip, rotate)

        # Feedforward and extract output images
        with torch.no_grad():
            x = torch.FloatTensor(x_imgs_augmented).to(device)
            print('x shape: ', x.shape)
            deconv6_sf, deconv6_sf_c, ref4 = layoutnet(x)
            #edg_de_list = edg_decoder(en_list[::-1])
            #cor_de_list = cor_decoder(en_list[-1:] + edg_de_list[:-1])

            edg_tensor = deconv6_sf
            cor_tensor = deconv6_sf_c
            roomtype_tensor = ref4

            # Recover the effect from augmentation
            edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
            cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        edgmap = edg_img.transpose([0, 2, 3, 1]).mean(0).copy()
        cormap = cor_img.transpose([0, 2, 3, 1]).mean(0)[..., 0].copy()

        # Post processing to extract layout
        cor_id = get_ini_cor(cormap, d1, d2)
        #if post_optimization:
        #    cor_id = optimize_cor_id(cor_id, edgmap, cormap,
        #                             num_iters=100, verbose=False)

        # Draw extracted layout on source image
        bon_img = draw_boundary_from_cor_id(cor_id.copy(), i_img * 255)

        # Composite all result in one image
        all_in_one = 0.3 * edgmap + 0.3 * cormap[..., None] + 0.4 * i_img
        all_in_one = draw_boundary_from_cor_id(cor_id.copy(), all_in_one * 255)

        # Dump results
        basename = os.path.splitext(os.path.basename(i_path))[0]
        path_edg = os.path.join(output_dir, '%s_edg.png' % basename)
        path_cor = os.path.join(output_dir, '%s_cor.png' % basename)
        path_bon = os.path.join(output_dir, '%s_bon.png' % basename)
        path_all_in_one = os.path.join(output_dir, '%s_all.png' % basename)
        path_cor_id = os.path.join(output_dir, '%s_cor_id.txt' % basename)

        Image.fromarray((edgmap * 255).astype(np.uint8)).save(path_edg)
        Image.fromarray((cormap * 255).astype(np.uint8)).save(path_cor)
        Image.fromarray(bon_img).save(path_bon)
        Image.fromarray(all_in_one).save(path_all_in_one)
        with open(path_cor_id, 'w') as f:
            for x, y in cor_id:
                f.write('%.6f %.6f\n' % (x, y))
예제 #3
0
        # Recover the effect from augmentation
        edg_img = augment_undo(edg_tensor.cpu().numpy(), aug_type)
        cor_img = augment_undo(cor_tensor.cpu().numpy(), aug_type)

        # Merge all results from augmentation
        edg_img = edg_img.transpose([0, 2, 3, 1]).mean(0)
        cor_img = cor_img.transpose([0, 2, 3, 1]).mean(0)[..., 0]

    # Load ground truth corner label
    k = datas[-1][:-4]
    path = os.path.join(args.root_dir, 'label_cor', '%s.txt' % k)
    with open(path) as f:
        gt = np.array([line.strip().split() for line in f], np.float64)

    # Construct corner label from predicted corner map
    cor_id = get_ini_cor(cor_img, args.d1, args.d2)

    # Gradient descent optimization
    if args.post_optimization:
        cor_id = optimize_cor_id(cor_id,
                                 edg_img,
                                 cor_img,
                                 num_iters=100,
                                 verbose=False)

    # Compute normalized corner error
    cor_error = ((gt - cor_id)**2).sum(1)**0.5
    cor_error /= np.sqrt(cor_img.shape[0]**2 + cor_img.shape[1]**2)
    pe_error = eval_PE(cor_id[0::2], cor_id[1::2], gt[0::2], gt[1::2])
    iou3d = eval_3diou(cor_id[1::2], cor_id[0::2], gt[1::2], gt[0::2])
    test_losses.update('CE(%)', cor_error.mean() * 100)
예제 #4
0
    # load gt
    path = './data' + filename[26:] + '.txt'

    with open(path) as f:
        gt = np.array([line.strip().split() for line in f], np.float64)

    # sort gt
    gt_id = np.argsort(gt[:, 0])
    gt = gt[gt_id, :]
    for row in range(0, gt.shape[0], 2):
        gt_id = np.argsort(gt[row:row + 2, 1])
        gt[row:row + 2, :] = gt[row:row + 2, gt_id]

    # corner error
    cor_id = get_ini_cor(cor_img, 21, 3)

    cor_id = optimize_cor_id(cor_id,
                             edg_img,
                             cor_img,
                             num_iters=100,
                             verbose=False)

    # sort cor_id
    cor_idd = np.argsort(cor_id[:, 0])
    cor_id = cor_id[cor_idd, :]
    for row in range(0, cor_id.shape[0], 2):
        cor_idd = np.argsort(cor_id[row:row + 2, 1])
        cor_id[row:row + 2, :] = cor_id[row:row + 2, cor_idd]

    cor_error = ((gt - cor_id)**2).sum(1)**0.5