def test_quaternion_multiplication(self):
     """Quaternion and matrix multiplication are equivalent."""
     a = random_quaternions(15, torch.float64).reshape((3, 5, 4))
     b = random_quaternions(21, torch.float64).reshape((7, 3, 1, 4))
     ab = quaternion_multiply(a, b)
     self.assertEqual(ab.shape, (7, 3, 5, 4))
     a_matrix = quaternion_to_matrix(a)
     b_matrix = quaternion_to_matrix(b)
     ab_matrix = torch.matmul(a_matrix, b_matrix)
     ab_from_matrix = matrix_to_quaternion(ab_matrix)
     self._assert_quaternions_close(ab, ab_from_matrix)
 def test_quat_grad_exists(self):
     """Quaternion calculations are differentiable."""
     rotation = random_rotation()
     rotation.requires_grad = True
     modified = quaternion_to_matrix(matrix_to_quaternion(rotation))
     [g] = torch.autograd.grad(modified.sum(), rotation)
     self.assertTrue(torch.isfinite(g).all())
예제 #3
0
def interpolate_cameras(C, R):
    if torch.isfinite(C).all():
        return C.clone(), R.clone()

    from pytorch3d.transforms import rotation_conversions
    from scipy.interpolate import interp1d

    ok = torch.isfinite(C.mean(1))
    quats = rotation_conversions.matrix_to_quaternion(R)

    n_frames = C.shape[0]
    y = torch.cat((quats, C), dim=1).numpy()
    x = torch.arange(n_frames).float().numpy()
    ok = np.isfinite(y.mean(1))

    fi = interp1d(
        x[ok], y[ok], kind='linear',
        bounds_error=False, axis=0,
        fill_value=(y[ok][0], y[ok][-1])
    )

    y_interp = fi(x)

    i_quats = torch.tensor(y_interp[:, :4]).float()
    i_R = rotation_conversions.quaternion_to_matrix(i_quats)
    i_C = torch.tensor(y_interp[:, 4:]).float()

    return i_C, i_R
예제 #4
0
 def evimo_to_pytorch3d_Rotation(self, p: dict):
     pos_q = torch.Tensor([float(e) for e in p['q'].values()])
     pos_R = rc.quaternion_to_matrix(pos_q)
     pos_R = pos_R.transpose(1, 0)
     R = torch.Tensor(np.zeros((3, 3), dtype=float))
     R[0, 0], R[0, 1], R[0, 2] = pos_R[1, 1], pos_R[1, 0], -pos_R[1, 2]
     R[1, 0], R[1, 1], R[1, 2] = pos_R[0, 1], pos_R[0, 0], -pos_R[0, 2]
     R[2, 0], R[2, 1], R[2, 2] = -pos_R[2, 1], -pos_R[2, 0], pos_R[2, 2]
     return R
예제 #5
0
    def test_quaternion_application(self):
        """Applying a quaternion is the same as applying the matrix."""
        quaternions = random_quaternions(3, torch.float64, requires_grad=True)
        matrices = quaternion_to_matrix(quaternions)
        points = torch.randn(3, 3, dtype=torch.float64, requires_grad=True)
        transform1 = quaternion_apply(quaternions, points)
        transform2 = torch.matmul(matrices, points[..., None])[..., 0]
        self.assertTrue(torch.allclose(transform1, transform2))

        [p, q] = torch.autograd.grad(transform1.sum(), [points, quaternions])
        self.assertTrue(torch.isfinite(p).all())
        self.assertTrue(torch.isfinite(q).all())
    def test_matrix_to_quaternion_by_pi(self):
        # We check that rotations by pi around each of the 26
        # nonzero vectors containing nothing but 0, 1 and -1
        # are mapped to the right quaternions.
        # This is representative across the directions.
        options = [0.0, -1.0, 1.0]
        axes = [
            torch.tensor(vec)
            for vec in itertools.islice(  # exclude [0, 0, 0]
                itertools.product(options, options, options), 1, None
            )
        ]

        axes = torch.nn.functional.normalize(torch.stack(axes), dim=-1)
        # Rotation by pi around unit vector x is given by
        # the matrix 2 x x^T - Id.
        R = 2 * torch.matmul(axes[..., None], axes[..., None, :]) - torch.eye(3)
        quats_hat = matrix_to_quaternion(R)
        R_hat = quaternion_to_matrix(quats_hat)
        self.assertClose(R, R_hat, atol=1e-3)
예제 #7
0
def calc_location(box_2d, proj_matrix, dimension, quaternion, gt_trans):
    #global orientation
    R = quaternion_to_matrix(th.as_tensor(quaternion)).cpu().numpy()

    # format 2d corners
    # box_2d is (top, left, height, width)
    xmin = box_2d[1]
    xmax = (box_2d[3] + xmin)
    ymin = box_2d[0]
    ymax = (box_2d[2] + ymin)

    # left top right bottom

    dx = dimension[0] / 2
    dy = dimension[1] / 2
    dz = dimension[2] / 2

    vertices = []
    for i in (-1, 1):
        for j in (-1, 1):
            for k in (-1, 1):
                vertices.append([i * dx, j * dy, k * dz])

    constraints = list(itertools.permutations(vertices, 4))

    # create pre M (the term with I and the R*X)
    pre_M = np.zeros([4, 4])
    # 1's down diagonal
    for i in range(0, 4):
        pre_M[i][i] = 1

    best_loc = None
    best_error = [np.inf]
    best_X = None

    # loop through each possible constraint, hold on to the best guess
    count = 0
    for constraint in constraints:
        # each corner
        Xa = constraint[0]
        Xb = constraint[1]
        Xc = constraint[2]
        Xd = constraint[3]

        X_array = [Xa, Xb, Xc, Xd]

        # M: all 1's down diagonal, and upper 3x1 is Rotation_matrix * [x, y,
        # z]
        Ma = np.copy(pre_M)
        Mb = np.copy(pre_M)
        Mc = np.copy(pre_M)
        Md = np.copy(pre_M)

        M_array = [Ma, Mb, Mc, Md]

        # create A, b
        A = np.zeros([4, 3], dtype=np.float)
        b = np.zeros([4, 1])

        indicies = [0, 1, 0, 1]
        for row, index in enumerate(indicies):
            X = X_array[row]
            M = M_array[row]

            # create M for corner Xx
            RX = np.dot(R, X)
            M[:3, 3] = RX.reshape(3)

            K = proj_matrix[:3, :]
            M = np.dot(K, M)

            # ref: http://ywpkwon.github.io/pdf/bbox3d-study.pdf
            A[row, :] = M[index, :3] - box_2d[row] * M[2, :3]
            b[row] = box_2d[row] * M[2, 3] - M[index, 3]

        # solve here with least squares, since over fit will get some error
        loc, error, rank, s = np.linalg.lstsq(A, b, rcond=None)

        # found a better estimation
        if error < best_error:
            count += 1  # for debugging
            best_loc = loc
            best_error = error
            best_X = X_array

    # return best_loc, [left_constraints, right_constraints] # for debugging
    best_loc = [best_loc[0][0], best_loc[1][0], best_loc[2][0]]
    print("lstsq error:", best_error)
    return best_loc, best_X
예제 #8
0
 def test_to_quat(self):
     """mtx -> quat -> mtx"""
     data = random_rotations(13, dtype=torch.float64)
     mdata = quaternion_to_matrix(matrix_to_quaternion(data))
     self.assertTrue(torch.allclose(data, mdata))
예제 #9
0
 def test_from_quat(self):
     """quat -> mtx -> quat"""
     data = random_quaternions(13, dtype=torch.float64)
     mdata = matrix_to_quaternion(quaternion_to_matrix(data))
     self.assertTrue(torch.allclose(data, mdata))
예제 #10
0
def pred_synth(segpose,
               params: Params,
               mesh_type: str = "dolphin",
               device: str = "cuda"):

    if not params.pred_dir and not os.path.exists(params.pred_dir):
        raise FileNotFoundError(
            "Prediction directory has not been set or the file does not exist, please set using cli args or params"
        )
    pred_folders = [
        join(params.pred_dir, f) for f in os.listdir(params.pred_dir)
    ]
    count = 1
    for p in sorted(pred_folders):
        try:
            print(p)
            manager = RenderManager.from_path(p)
            manager.rectify_paths(base_folder=params.pred_dir)
        except FileNotFoundError:
            continue
        # Run Silhouette Prediction Network
        logging.info(f"Starting mask predictions")
        mask_priors = []
        R_pred, T_pred = [], []
        q_loss, t_loss = 0, 0
        # Collect Translation stats
        R_gt, T_gt = manager._trajectory
        poses_gt = EvMaskPoseDataset.preprocess_poses(manager._trajectory)
        std_T, mean_T = torch.std_mean(T_gt)
        for idx in range(len(manager)):
            try:
                ev_frame = manager.get_event_frame(idx)
            except Exception as e:
                print(e)
                break
            mask_pred, pose_pred = predict_segpose(segpose, ev_frame,
                                                   params.threshold_conf,
                                                   params.img_size)
            # mask_pred = smooth_predicted_mask(mask_pred)
            manager.add_pred(idx, mask_pred, "silhouette")
            mask_priors.append(torch.from_numpy(mask_pred))

            # Make qexp a torch function
            # q_pred = qexp(pose_pred[:, 3:])
            # q_targ = qexp(poses_gt[idx, 3:].unsqueeze(0))
            ####  SHOULD THIS BE NORMALIZED ??
            q_pred = pose_pred[:, 3:]
            q_targ = poses_gt[idx, 3:]

            q_pred_unit = q_pred / torch.norm(q_pred)
            q_targ_unit = q_targ / torch.norm(q_targ)
            # print("learnt: ", q_pred_unit, q_targ_unit)

            t_pred = pose_pred[:, :3] * std_T + mean_T
            t_targ = poses_gt[idx, :3] * std_T + mean_T
            T_pred.append(t_pred)

            q_loss += quaternion_angular_error(q_pred_unit, q_targ_unit)
            t_loss += t_error(t_pred, t_targ)

            r_pred = rc.quaternion_to_matrix(q_pred).unsqueeze(0)
            R_pred.append(r_pred.squeeze(0))

        q_loss_mean = q_loss / (idx + 1)
        t_loss_mean = t_loss / (idx + 1)

        # Convert R,T to world-to-view transforms --> Pytorch3d convention for the :

        R_pred_abs = torch.cat(R_pred)
        T_pred_abs = torch.cat(T_pred)
        # Take inverse of view-to-world (output of net) to obtain w2v
        wtv_trans = (get_world_to_view_transform(
            R=R_pred_abs, T=T_pred_abs).inverse().get_matrix())
        T_pred = wtv_trans[:, 3, :3]
        R_pred = wtv_trans[:, :3, :3]
        R_pred_test = look_at_rotation(T_pred_abs)
        T_pred_test = -torch.bmm(R_pred_test.transpose(1, 2),
                                 T_pred_abs[:, :, None])[:, :, 0]
        # Convert back to view-to-world to get absolute
        vtw_trans = (get_world_to_view_transform(
            R=R_pred_test, T=T_pred_test).inverse().get_matrix())
        T_pred_trans = vtw_trans[:, 3, :3]
        R_pred_trans = vtw_trans[:, :3, :3]

        # Calc pose error for this:
        q_loss_mean_test = 0
        t_loss_mean_test = 0
        for idx in range(len(R_pred_test)):
            q_pred_trans = rc.matrix_to_quaternion(R_pred_trans[idx]).squeeze()
            q_targ = poses_gt[idx, 3:]
            q_targ_unit = q_targ / torch.norm(q_targ)
            # print("look: ", q_test, q_targ)
            q_loss_mean_test += quaternion_angular_error(
                q_pred_trans, q_targ_unit)
            t_targ = poses_gt[idx, :3] * std_T + mean_T
            t_loss_mean_test += t_error(T_pred_trans[idx], t_targ)
        q_loss_mean_test /= idx + 1
        t_loss_mean_test /= idx + 1

        logging.info(
            f"Mean Translation Error: {t_loss_mean}; Mean Rotation Error: {q_loss_mean}"
        )
        logging.info(
            f"Mean Translation Error: {t_loss_mean_test}; Mean Rotation Error: {q_loss_mean_test}"
        )

        # Plot estimated cameras
        logging.info(f"Plotting pose map")
        idx = random.sample(range(len(R_gt)), k=2)
        pose_plot = plot_cams_from_poses(
            (R_gt[idx], T_gt[idx]), (R_pred[idx], T_pred[idx]), params.device)
        pose_plot_test = plot_cams_from_poses(
            (R_gt[idx], T_gt[idx]), (R_pred_test[idx], T_pred_test[idx]),
            params.device)
        manager.add_pose_plot(pose_plot, "rot+trans")
        manager.add_pose_plot(pose_plot_test, "trans")

        count += 1
        groundtruth_silhouettes = manager._images("silhouette") / 255.0
        print(groundtruth_silhouettes.shape)
        print(torch.stack((mask_priors)).shape)
        seg_iou = neg_iou_loss(groundtruth_silhouettes,
                               torch.stack((mask_priors)) / 255.0)
        print("Seg IoU", seg_iou)

        # RUN MESH DEFORMATION

        # RUN MESH DEFORMATION
        # Run it 3 times: w/ Rot+Trans - w/ Trans+LookAt - w/ GT Pose
        experiments = {
            "GT-Pose": [R_gt, T_gt],
            # "Rot+Trans": [R_pred, T_pred],
            # "Trans+LookAt": [R_pred_test, T_pred_test]
        }

        results = {}
        input_m = torch.stack((mask_priors))

        for i in range(len(experiments.keys())):

            logging.info(
                f"Input pred shape & max: {input_m.shape}, {input_m.max()}")
            # The MeshDeformation model will return silhouettes across all view by default

            mesh_model = MeshDeformationModel(device=device, params=params)

            R, T = list(experiments.values())[i]
            experiment_results = mesh_model.run_optimization(input_m, R, T)
            renders = mesh_model.render_final_mesh((R, T), "predict",
                                                   input_m.shape[-2:])

            mesh_silhouettes = renders["silhouettes"].squeeze(1)
            mesh_images = renders["images"].squeeze(1)
            experiment_name = list(experiments.keys())[i]
            for idx in range(len(mesh_silhouettes)):
                manager.add_pred(
                    idx,
                    mesh_silhouettes[idx].cpu().numpy(),
                    "silhouette",
                    destination=f"mesh_{experiment_name}",
                )
                manager.add_pred(
                    idx,
                    mesh_images[idx].cpu().numpy(),
                    "phong",
                    destination=f"mesh_{experiment_name}",
                )

            # Calculate chamfer loss:
            mesh_pred = mesh_model._final_mesh
            if mesh_type == "dolphin":
                path = "data/meshes/dolphin/dolphin.obj"
                mesh_gt = load_objs_as_meshes(
                    [path],
                    create_texture_atlas=False,
                    load_textures=True,
                    device=device,
                )
            # Shapenet Cars
            elif mesh_type == "shapenet":
                mesh_info = manager.metadata["mesh_info"]
                path = os.path.join(
                    params.gt_mesh_path,
                    f"/ShapeNetCorev2/{mesh_info['synset_id']}/{mesh_info['mesh_id']}/models/model_normalized.obj"
                )
                # path = f"data/ShapeNetCorev2/{mesh_info['synset_id']}/{mesh_info['mesh_id']}/models/model_normalized.obj"
                try:
                    verts, faces, aux = load_obj(path,
                                                 load_textures=True,
                                                 create_texture_atlas=True)

                    mesh_gt = Meshes(
                        verts=[verts],
                        faces=[faces.verts_idx],
                        textures=TexturesAtlas(atlas=[aux.texture_atlas]),
                    ).to(device)
                except:
                    mesh_gt = None
                    print("CANNOT COMPUTE CHAMFER LOSS")
            if mesh_gt:
                mesh_pred_compute, mesh_gt_compute = scale_meshes(
                    mesh_pred.clone(), mesh_gt.clone())
                pcl_pred = sample_points_from_meshes(mesh_pred_compute,
                                                     num_samples=5000)
                pcl_gt = sample_points_from_meshes(mesh_gt_compute,
                                                   num_samples=5000)
                chamfer_loss = chamfer_distance(pcl_pred,
                                                pcl_gt,
                                                point_reduction="mean")
                print("CHAMFER LOSS: ", chamfer_loss)
                experiment_results["chamfer_loss"] = (
                    chamfer_loss[0].cpu().detach().numpy().tolist())

            mesh_iou = neg_iou_loss(groundtruth_silhouettes, mesh_silhouettes)

            experiment_results["mesh_iou"] = mesh_iou.cpu().numpy().tolist()

            results[experiment_name] = experiment_results

            manager.add_pred_mesh(mesh_pred, experiment_name)
        # logging.info(f"Input pred shape & max: {input_m.shape}, {input_m.max()}")
        # # The MeshDeformation model will return silhouettes across all view by default
        #
        #
        #
        # experiment_results = models["mesh"].run_optimization(input_m, R_gt, T_gt)
        # renders = models["mesh"].render_final_mesh(
        #     (R_gt, T_gt), "predict", input_m.shape[-2:]
        # )
        #
        # mesh_silhouettes = renders["silhouettes"].squeeze(1)
        # mesh_images = renders["images"].squeeze(1)
        # experiment_name = params.name
        # for idx in range(len(mesh_silhouettes)):
        #     manager.add_pred(
        #         idx,
        #         mesh_silhouettes[idx].cpu().numpy(),
        #         "silhouette",
        #         destination=f"mesh_{experiment_name}",
        #     )
        #     manager.add_pred(
        #         idx,
        #         mesh_images[idx].cpu().numpy(),
        #         "phong",
        #         destination=f"mesh_{experiment_name}",
        #     )
        #
        # # Calculate chamfer loss:
        # mesh_pred = models["mesh"]._final_mesh
        # if mesh_type == "dolphin":
        #     path = params.gt_mesh_path
        #     mesh_gt = load_objs_as_meshes(
        #         [path],
        #         create_texture_atlas=False,
        #         load_textures=True,
        #         device=device,
        #     )
        # # Shapenet Cars
        # elif mesh_type == "shapenet":
        #     mesh_info = manager.metadata["mesh_info"]
        #     path = params.gt_mesh_path
        #     try:
        #         verts, faces, aux = load_obj(
        #             path, load_textures=True, create_texture_atlas=True
        #         )
        #
        #         mesh_gt = Meshes(
        #             verts=[verts],
        #             faces=[faces.verts_idx],
        #             textures=TexturesAtlas(atlas=[aux.texture_atlas]),
        #         ).to(device)
        #     except:
        #         mesh_gt = None
        #         print("CANNOT COMPUTE CHAMFER LOSS")
        # if mesh_gt and params.is_real_data:
        #     mesh_pred_compute, mesh_gt_compute = scale_meshes(
        #         mesh_pred.clone(), mesh_gt.clone()
        #     )
        #     pcl_pred = sample_points_from_meshes(
        #         mesh_pred_compute, num_samples=5000
        #     )
        #     pcl_gt = sample_points_from_meshes(mesh_gt_compute, num_samples=5000)
        #     chamfer_loss = chamfer_distance(
        #         pcl_pred, pcl_gt, point_reduction="mean"
        #     )
        #     print("CHAMFER LOSS: ", chamfer_loss)
        #     experiment_results["chamfer_loss"] = (
        #         chamfer_loss[0].cpu().detach().numpy().tolist()
        #     )
        #
        # mesh_iou = neg_iou_loss_all(groundtruth_silhouettes, mesh_silhouettes)
        #
        # experiment_results["mesh_iou"] = mesh_iou.cpu().numpy().tolist()
        #
        # results[experiment_name] = experiment_results
        #
        # manager.add_pred_mesh(mesh_pred, experiment_name)

        seg_iou = neg_iou_loss_all(groundtruth_silhouettes, input_m / 255.0)
        gt_iou = neg_iou_loss_all(groundtruth_silhouettes,
                                  groundtruth_silhouettes)

        results["mesh_iou"] = mesh_iou.detach().cpu().numpy().tolist()
        results["seg_iou"] = seg_iou.detach().cpu().numpy().tolist()
        logging.info(f"Mesh IOU list & results: {mesh_iou}")
        logging.info(f"Seg IOU list & results: {seg_iou}")
        logging.info(f"GT IOU list & results: {gt_iou} ")

        # results["mean_iou"] = IOULoss().forward(groundtruth, mesh_silhouettes).detach().cpu().numpy().tolist()
        # results["mean_dice"] = DiceCoeffLoss().forward(groundtruth, mesh_silhouettes)

        manager.set_pred_results(results)
        manager.close()
예제 #11
0
def shape_assembly_visualization_function(part1, part2, gt_transform, encoder_decoder, subsample, scene_per_batch):

        samples = part2["sdf_samples"]

        samples.requires_grad = False

        sdf_data = (samples.to(device)).reshape(
            subsample * scene_per_batch, 5
        )
        
        xyzs = sdf_data[:, 0:3]
        sdf_gt_part1 = sdf_data[:, 3].unsqueeze(1)
        sdf_gt_part2 = sdf_data[:, 4].unsqueeze(1)
        
        part1_transform_vec = torch.cat((part1["center"], part1["quaternion"]), 1).to(device)

        _, _, predicted_translation, predicted_rotation = encoder_decoder(
                                                part1["surface_points"].to(device), 
                                                part2["surface_points"].to(device), 
                                                xyzs,
                                                part1_transform_vec
                                            )

        mesh1 = o3d.io.read_triangle_mesh(part1["mesh_filename"][0])
        mesh2 = o3d.io.read_triangle_mesh(part2["mesh_filename"][0])

        mesh1.compute_vertex_normals()
        mesh2.compute_vertex_normals()

        mesh1.paint_uniform_color([1, 0.706, 0])
        mesh2.paint_uniform_color([0.706, 1, 0])

        o3d.visualization.draw_geometries([mesh1, mesh2])

        new_mesh = copy.deepcopy(mesh1)
        gt_transform = gt_transform[0].numpy()
        new_mesh.transform(gt_transform)
        new_mesh.rotate(gt_transform[0:3,0:3].T)
        # for i in range(len(new_mesh.vertices)):
        #     new_mesh.vertices[i] = new_mesh.vertices[i] - np.dot(gt_transform[0:3,0:3].T, gt_transform[0:3,3])
        #     new_mesh.vertices[i] = np.dot(gt_transform[0:3,0:3].T, new_mesh.vertices[i])
        
        new_mesh.compute_vertex_normals()

        o3d.visualization.draw_geometries([new_mesh, mesh2])

        predicted_rotation = quaternion_to_matrix(predicted_rotation)

        predicted_rotation = predicted_rotation.detach()
        predicted_translation = predicted_translation.detach()

        transformation = np.zeros((4,4))
        transformation[3,3] = 1

        for point_ind in range(subsample):
            
            transformation[:3,:3] = predicted_rotation[point_ind].numpy()
            transformation[:3,3] = predicted_translation[point_ind].numpy()

            # Rotate mesh1 to be aligned with mesh2
            new_mesh = copy.deepcopy(mesh1)

            new_mesh = new_mesh.transform(transformation)
            o3d.visualization.draw_geometries([new_mesh, mesh2])

        return