def test_batch_support_check(self, device, dtype): P1_batch = torch.tensor( [ [ [9.4692e02, -9.6658e02, 6.0862e02, -2.3076e05], [-2.1829e02, 5.4163e02, 1.3445e03, -6.4387e05], [-6.0675e-01, -6.9807e-01, 3.8021e-01, 3.8896e02], ], [ [9.4692e02, -9.6658e02, 6.0862e02, -2.3076e05], [-2.1829e02, 5.4163e02, 1.3445e03, -6.4387e05], [-6.0675e-01, -6.9807e-01, 3.8021e-01, 3.8896e02], ], ], device=device, dtype=dtype, ) P1 = torch.tensor( [ [ [9.4692e02, -9.6658e02, 6.0862e02, -2.3076e05], [-2.1829e02, 5.4163e02, 1.3445e03, -6.4387e05], [-6.0675e-01, -6.9807e-01, 3.8021e-01, 3.8896e02], ] ], device=device, dtype=dtype, ) P2_batch = torch.tensor( [ [ [1.1518e03, -7.5822e02, 5.4764e02, -1.9764e05], [-2.1548e02, 5.3102e02, 1.3492e03, -6.4731e05], [-4.3727e-01, -7.8632e-01, 4.3646e-01, 3.4515e02], ], [ [9.9595e02, -8.6464e02, 6.7959e02, -2.7517e05], [-8.1716e01, 7.7826e02, 1.2395e03, -5.8137e05], [-5.7090e-01, -6.0416e-01, 5.5594e-01, 2.8111e02], ], ], device=device, dtype=dtype, ) P2 = torch.tensor( [ [ [1.1518e03, -7.5822e02, 5.4764e02, -1.9764e05], [-2.1548e02, 5.3102e02, 1.3492e03, -6.4731e05], [-4.3727e-01, -7.8632e-01, 4.3646e-01, 3.4515e02], ] ], device=device, dtype=dtype, ) F_batch = epi.fundamental_from_projections(P1_batch, P2_batch) F = epi.fundamental_from_projections(P1, P2) assert (F_batch[0] == F[0]).all()
def test_from_to_projections(self, device, dtype): P1 = torch.tensor( [[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0]]], device=device, dtype=dtype ) P2 = torch.tensor( [[[1.0, 1.0, 1.0, 3.0], [0.0, 2.0, 0.0, 3.0], [0.0, 1.0, 1.0, 0.0]]], device=device, dtype=dtype ) F_mat = epi.fundamental_from_projections(P1, P2) P_mat = epi.projections_from_fundamental(F_mat) F_hat = epi.fundamental_from_projections(P_mat[..., 0], P_mat[..., 1]) F_mat_norm = epi.normalize_transformation(F_mat) F_hat_norm = epi.normalize_transformation(F_hat) assert_close(F_mat_norm, F_hat_norm, atol=1e-4, rtol=1e-4)
def generate_two_view_random_scene(device: torch.device = torch.device("cpu"), dtype: torch.dtype = torch.float32 ) -> Dict[str, torch.Tensor]: num_views: int = 2 num_points: int = 30 scene: Dict[str, torch.Tensor] = epi.generate_scene(num_views, num_points) # internal parameters (same K) K1 = scene['K'].to(device, dtype) K2 = K1.clone() # rotation R1 = scene['R'][0:1].to(device, dtype) R2 = scene['R'][1:2].to(device, dtype) # translation t1 = scene['t'][0:1].to(device, dtype) t2 = scene['t'][1:2].to(device, dtype) # projection matrix, P = K(R|t) P1 = scene['P'][0:1].to(device, dtype) P2 = scene['P'][1:2].to(device, dtype) # fundamental matrix F_mat = epi.fundamental_from_projections(P1[..., :3, :], P2[..., :3, :]) F_mat = epi.normalize_transformation(F_mat) # points 3d X = scene['points3d'].to(device, dtype) # projected points x1 = scene['points2d'][0:1].to(device, dtype) x2 = scene['points2d'][1:2].to(device, dtype) return dict(K1=K1, K2=K2, R1=R1, R2=R2, t1=t1, t2=t2, P1=P1, P2=P2, F=F_mat, X=X, x1=x1, x2=x2)
def test_shape_large(self, device, dtype): P1 = torch.rand(1, 2, 3, 4, device=device, dtype=dtype) P2 = torch.rand(1, 2, 3, 4, device=device, dtype=dtype) F_mat = epi.fundamental_from_projections(P1, P2) assert F_mat.shape == (1, 2, 3, 3)
def test_shape(self, batch_size, device, dtype): B: int = batch_size P1 = torch.rand(B, 3, 4, device=device, dtype=dtype) P2 = torch.rand(B, 3, 4, device=device, dtype=dtype) F_mat = epi.fundamental_from_projections(P1, P2) assert F_mat.shape == (B, 3, 3)