Beispiel #1
0
    def test_rotation_inverse(self, device, dtype):
        h, w = 4, 4
        img_b = torch.rand(1, 1, h, w, device=device, dtype=dtype)

        # create rotation matrix of 90deg (anti-clockwise)
        center = torch.tensor([[w - 1, h - 1]], device=device, dtype=dtype) / 2
        scale = torch.ones((1, 2), device=device, dtype=dtype)
        angle = 90. * torch.ones(1, device=device, dtype=dtype)
        aff_ab = kornia.get_rotation_matrix2d(center, angle, scale)
        # Same as opencv: cv2.getRotationMatrix2D(((w-1)/2,(h-1)/2), 90., 1.)

        # warp the tensor
        # Same as opencv: cv2.warpAffine(kornia.tensor_to_image(img_b), aff_ab[0].numpy(), (w, h))
        img_a = kornia.warp_affine(img_b, aff_ab, (h, w))

        # invert the transform
        aff_ba = kornia.convert_affinematrix_to_homography(aff_ab).inverse()[..., :2, :]
        img_b_hat = kornia.warp_affine(img_a, aff_ba, (h, w))
        assert_allclose(img_b_hat, img_b, atol=1e-3, rtol=1e-3)
Beispiel #2
0
    parser.add_argument("--patience",
                        type=int,
                        default=5,
                        help="training patience")

    args = parser.parse_args()
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    MPs = torch.from_numpy(np.loadtxt("data/MP.txt")).cuda()
    KFs = torch.from_numpy(np.loadtxt("data/KF.txt")).cuda()
    Matches = torch.from_numpy(np.loadtxt("data/Match.txt")).cuda()

    fx, fy, cx, cy = 320, 320, 320, 240
    affine = torch.FloatTensor([[[fx, 0, cx], [0, fy, cy]]])
    K = kn.convert_affinematrix_to_homography(affine).cuda()

    net = BAGDnet(MPs, KFs, K)

    SmoothLoss = nn.SmoothL1Loss(beta=math.sqrt(5.99))
    optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0)
    scheduler = EarlyStopScheduler(optimizer,
                                   factor=args.factor,
                                   verbose=True,
                                   min_lr=args.min_lr,
                                   patience=args.patience)

    pixel = Matches[:, 2:4]
    frame_id = Matches[:, 0, None].type(torch.int)
    point_id = Matches[:, 1, None].type(torch.int)