Esempio n. 1
0
    def soft_tf(self, pc1, pc2, indexor):
        pc2_centroid = torch.sum(pc2[:3, :] * indexor, -1) / torch.sum(indexor)
        pc2_centred = ((pc2[:3, :].t() - pc2_centroid).t() * indexor).t()

        pc1_centroid = torch.sum(pc1[:3, :] * indexor, -1) / torch.sum(indexor)
        pc1_centred = ((pc1[:3, :].t() - pc1_centroid).t() * indexor).t()

        H = torch.matmul(pc1_centred.t(), pc2_centred)
        logger.debug('SVD on:')
        logger.debug(H)
        U, S, V = torch.svd(H)
        if torch.det(U) * torch.det(V) < 0:
            V = V * V.new_tensor([[1, 1, -1], [1, 1, -1], [1, 1, -1]])

        R = torch.matmul(V, U.t())

        # translation
        t = pc2_centroid - torch.matmul(R, pc1_centroid)

        # homogeneous transformation
        T = pc2.new_zeros(4, 4)
        T[:3, :3] = R
        T[:3, 3] = t
        T[3, 3] = 1

        return T, utils.rot_to_quat(R), t
Esempio n. 2
0
    def __getitem__(self, idx):
        resource = self.data[idx]
        sample = dict()

        img_name = self.root_path + self.folders + resource[0]
        sample['rgb'] = PIL.Image.open(img_name)

        sample['K'] = np.array(self.K, dtype=np.float32)

        if self.transform:
            if 'first' in self.transform:
                sample = torchvis.transforms.Compose(self.transform['first'])(sample)
            for mod in self.transform:
                if mod not in ('first',) and mod in self.used_mod:
                    sample[mod] = torchvis.transforms.Compose(self.transform[mod])({mod: sample[mod],
                                                                                    'K': sample['K']})[mod]

        t = resource[1:4].astype('float') # m
        q = resource[4:8].astype('float')
        pose = np.zeros((4, 4), dtype=np.float32)
        R = putils.quat_to_rot(torch.tensor(q)).t().numpy()
        pose[:3, :3] = R
        pose[:3, 3] = t
        pose[3, 3] = 1
        q = putils.rot_to_quat(torch.from_numpy(pose[:3, :3])).numpy()
        sample['pose'] = {'p': t, 'q': q, 'T': pose}

        return sample
Esempio n. 3
0
    def directtf(self, pc1, pc2, indexor):
        _, indices = torch.unique(pc2[0, :], return_inverse=True)
        unique_indices = torch.unique(indices)
        #M = pc1.new_zeros(indexor.nonzero().numel() * 2, 12)
        A = pc1.new_zeros(indexor[unique_indices].nonzero().numel() * 3, 12)
        B = pc1.new_zeros(indexor[unique_indices].nonzero().numel() * 3, 1)

        if indexor.nonzero().numel() < 3:
            logger.warn(
                'Not enought correspondances to use dlt ({} match)'.format(
                    indexor.nonzero().numel()))
        cpt = 0
        for i in unique_indices:
            if indexor[i].item():
                A[cpt, :4] = pc1[:, i]
                A[cpt + 1, 4:8] = pc1[:, i]
                A[cpt + 2, 8:] = pc1[:, i]
                B[cpt] = pc2[0, i]
                B[cpt + 1] = pc2[1, i]
                B[cpt + 2] = pc2[2, i]

                cpt += 3

        X, _ = torch.gels(B, A)
        X = X[:12]

        P = X.view(3, 4)
        T = pc2.new_zeros(4, 4)
        T[:3, :] = P
        T[3, 3] = 1
        #R = T[:3, :3]
        #R = utils.quat_to_rot(utils.rot_to_quat(T[:3, :3]))

        R = normalize_rotmat(T[:3, :3])
        if torch.det(R) < 0:
            print('Inverse')
            R *= -1

        T[:3, :3] = R

        return T, utils.rot_to_quat(T[:3, :3]), T[:3, 3]
Esempio n. 4
0
    utils.plt_pc(pc_ref, ax, pas, 'b')
    utils.plt_pc(pcs[idx], ax, pas, 'r')

    plt.show()

    pc_ref.requires_grad = False
    #pc_ref = torch.load('base_model.pth')
    pose = poses[1][:3, :]

    im_fwd = ims[1].unsqueeze(0)
    #net = init_net()
    #net = small_init_net()
    net = nn.Sequential(
        CA.PixEncoder(k_size=4, d_fact=4),
        CA.PixDecoder(k_size=4, d_fact=4, out_channel=1, div_fact=2))
    q = utils.rot_to_quat(pose[:3, :3])
    t = pose[:3, 3]

    optimizer = optim.Adam(net.parameters(), lr=1e-4, weight_decay=1e-3)
    #net.register_backward_hook(module_hook)
    it = 10000
    n_hyp = 1
    tt_loss = list()
    #nb_pt_total = int(640 * 480 * scale**2)
    nb_pt_total = int(480 * 480 * scale**2)
    param_icp = {'iter': 3, 'fact': 2, 'dnorm': False, 'outlier': False}
    n_pt = int(0.05 * nb_pt_total)

    init_pose = torch.eye(4, 4)
    for i in tqdm.tqdm(range(it)):
        optimizer.zero_grad()
Esempio n. 5
0
def creat_new_sample(sample, zoom=0.2, reduce_fact=2, tilte_angle=1, final_size_depth_map=56):
    Q = torch.tensor([[1.0, 0, 0, 0],
                      [0, 1.0, 0, 0],
                      [0, 0, 1.0, 0]])

    K = torch.from_numpy(sample['K'])
    new_K = K.clone().detach()
    new_K[:2, :] /= reduce_fact

    D = torch.max(sample['depth'].view(-1))
    fov = 2*torch.atan2(K[0, 2], 2*K[0, 0])
    z_offest = random.random()*D*zoom
    max_angle = torch.atan2(D*torch.tan(fov/2), D - z_offest) - fov/2
    theta_x = random.choice([1, -1]) * (random.random() * 0.5 + 0.5) * max_angle

    theta_y = random.choice([1, -1]) * (random.random() * 0.5 + 0.5) * max_angle
    theta_z = random.choice([1, -1]) * random.random() * tilte_angle

    new_pose = torch.eye(4,4)
    new_pose[2, 3] = -z_offest
    new_pose[:3, :3] = utils.rotation_matrix(torch.tensor([1.0, 0, 0]), torch.tensor([theta_x])).matmul(
        new_pose[:3, :3])
    new_pose[:3, :3] = utils.rotation_matrix(torch.tensor([0, 1.0, 0]), torch.tensor([theta_y])).matmul(
        new_pose[:3, :3])
    new_pose[:3, :3] = utils.rotation_matrix(torch.tensor([0, 0, 1.0]), torch.tensor([theta_z])).matmul(
        new_pose[:3, :3])

    _, w, h = sample['rgb'].size()
    w = round(w/reduce_fact)
    h = round(h/reduce_fact)

    ori_pc, _ = utils.depth_map_to_pc(sample['depth'], K, remove_zeros=True)
    move_pc = new_pose.matmul(ori_pc)
    new_depth_maps = torch.zeros(1, w, h)
    repro = new_K.matmul(Q.matmul(move_pc))
    coord = (repro[:2] / repro[2]).round().long()
    coord[0, :] = coord[0, :].clamp(min=0, max=h-1)
    coord[1, :] = coord[1, :].clamp(min=0, max=w-1)
    #flat_coord = coord[0, :]*h + coord[1, :]
    #u_flat_coord = torch.unique(flat_coord)
    new_depth_maps[:, coord[1, :], coord[0, :]] = repro[2, :]

    gap_filed_depth = remove_gap(sample['depth'], sample['depth'].view(-1)==0)
    ori_pc = utils.depth_map_to_pc(gap_filed_depth, K, remove_zeros=False)
    move_pc = new_pose.matmul(ori_pc)
    repro = new_K.matmul(Q.matmul(move_pc))
    coord = (repro[:2] / repro[2]).round().long()
    coord[0, :] = coord[0, :].clamp(min=0, max=h - 1)
    coord[1, :] = coord[1, :].clamp(min=0, max=w - 1)
    new_image = torch.zeros(3, w, h)
    colors = sample['rgb'].view(3, -1)
    new_image[:, coord[1, :], coord[0, :]] = colors
    new_image = remove_gap(new_image,
                           ((new_image[0, :, :] + new_image[1, :, :] + new_image[2, :, :]) == 0).view(-1))

    '''
    for fidx in u_flat_coord:
        indexor = fidx == flat_coord
        if torch.sum(indexor) > 1:
            idx_min = torch.argmin(repro[2, indexor])
            idx_h = flat_coord[idx_min]//h
            idx_w = flat_coord[idx_min] - idx_h*h
            new_depth_maps[:, idx_w, idx_h] = repro[2, idx_min]
            new_image[:, idx_w, idx_h] = colors[:, idx_min]
    '''
    combined_new_pose = new_pose.matmul(torch.from_numpy(sample['pose']['T']))
    full_new_pose = {
        'T': combined_new_pose.numpy(),
        'position': new_pose[:3, 3].numpy(),
        'orientation': utils.rot_to_quat(new_pose[:3, :3]).numpy(),
    }

    return {'depth': torch.nn.functional.interpolate(new_depth_maps.unsqueeze(0),
                                                     size=final_size_depth_map, mode='nearest').squeeze(0),
            'rgb': new_image,
            'K':new_K.numpy(),
            'pose': full_new_pose}
Esempio n. 6
0
    def dlt(self, pc1, pc2, indexor):

        std, mean = torch.std(pc2[:3, :], 1), torch.mean(pc2[:3, :], 1)
        T2std, T2mean = pc2.new_zeros(4, 4), pc2.new_zeros(4, 4)
        T2mean[0, 0] = T2mean[1, 1] = T2mean[2, 2] = T2mean[3, 3] = 1
        T2mean[:3, 3] = -mean
        T2std[3, 3] = 1
        for i in range(3):
            T2std[i, i] = 1 / std[i]
        T2 = torch.matmul(T2std, T2mean)

        pc2 = T2.matmul(pc2)

        std, mean = torch.std(pc1[:3, :], 1), torch.mean(pc1[:3, :], 1)
        T1std, T1mean = pc1.new_zeros(4, 4), pc1.new_zeros(4, 4)
        T1mean[0, 0] = T1mean[1, 1] = T1mean[2, 2] = T1mean[3, 3] = 1
        T1mean[:3, 3] = -mean
        T1std[3, 3] = 1
        for i in range(3):
            T1std[i, i] = 1 / std[i]
        T1 = torch.matmul(T1std, T1mean)

        pc1 = T1.matmul(pc1)

        _, indices = torch.unique(pc2[0, :], return_inverse=True)
        unique_indices = torch.unique(indices)
        #M = pc1.new_zeros(indexor.nonzero().numel() * 2, 12)
        M = pc1.new_zeros(indexor[unique_indices].nonzero().numel() * 2, 12)
        print('Processing {} unique matches'.format(unique_indices.numel()))
        if indexor[unique_indices].nonzero().numel() < 6:
            logger.warn(
                'Not enought correspondances to use dlt ({} match)'.format(
                    indexor[unique_indices].nonzero().numel()))
        cpt = 0
        for i in unique_indices:
            if indexor[i].item():
                M[cpt, 4:] = torch.cat(
                    (-pc2[2, i] * pc1[:, i], pc2[1, i] * pc1[:, i]), 0)
                M[cpt + 1, :4] = pc2[2, i] * pc1[:, i]
                M[cpt + 1, 8:] = -pc2[0, i] * pc1[:, i]
                #                M[cpt + 2, :8] = torch.cat((-pt[1]*pc12[:, i], pt[0]*pc1[:, i]), 0)
                cpt += 2

        U, S, V = torch.svd(M)
        p = V[:, -1]

        if p[10].item() < 0:  # Diag of rot mat should be > 0
            print('inverse')
            p = p * -1

        norm = (p[8]**2 + p[9]**2 + p[10]**2)**0.5
        p = p / norm
        P = p.view(3, 4)

        # homogeneous transformation
        T = pc2.new_zeros(4, 4)
        T[:3, :] = P
        T[3, 3] = 1
        #T = T1.inverse().matmul(T.matmul(T2))
        T = T2.inverse().matmul(T.matmul(T1))
        print(T[:3, :3].matmul(T[:3, :3].t()))
        T[:3, :3] = normalize_rotmat(T[:3, :3])
        return T, utils.rot_to_quat(T[:3, :3]), T[:3, 3]
Esempio n. 7
0
def rot_to_quat(m):
    return putils.rot_to_quat(torch.tensor(m)).numpy()
Esempio n. 8
0
        quat = custom_q.Quaternion(matrix=rot)
        quat._normalise()
        rot = torch.FloatTensor(quat.rotation_matrix)
        pose[:3, :3] = rot

        poses.append(pose)

        pcs.append(utils.toSceneCoord(depth, pose, K, remove_zeros=True))

    pc_ref = torch.cat((pcs[0], pcs[2]), 1)
    pc_ref.requires_grad = False
    pose = poses[1][:3, :]

    im_fwd = ims[1].unsqueeze(0)
    net = init_net()
    q = utils.rot_to_quat(pose[:3, :3])
    t = pose[:3, 3]

    optimizer = optim.Adam(net.parameters())
    #net.register_backward_hook(module_hook)
    it = 1000
    n_pt = 150
    n_hyp = 50
    t_fact = 1
    inlier_alpha = 10
    param_icp = {'iter': 3, 'fact': 2, 'dnorm': True, 'outlier': False}

    tt_loss = list()
    nb_pt_total = int(640 * 480 * scale**2)

    init_pose = torch.eye(4, 4)