def test_affine_grid_2d(self):
        N = 8
        C = 3
        H = 256
        W = 128
        theta = np.random.randn(N, 2, 3).astype(np.float32)
        features = np.random.randint(256, size=(N, C, H, W)).astype(np.float32)

        torch_theta = torch.Tensor(theta)
        torch_features = torch.Tensor(features)
        torch_grid = F.affine_grid(torch_theta,
                                   size=(N, C, H, W),
                                   align_corners=False)
        torch_sample = F.grid_sample(torch_features,
                                     torch_grid,
                                     mode='bilinear',
                                     padding_mode='zeros',
                                     align_corners=False)

        jt_theta = jt.array(theta)
        jt_features = jt.array(features)
        jt_grid = affine_grid(jt_theta, size=(N, C, H, W), align_corners=False)
        jt_sample = grid_sample(jt_features,
                                jt_grid,
                                mode='bilinear',
                                padding_mode='zeros',
                                align_corners=False)

        assert np.allclose(jt_theta.numpy(), torch_theta.numpy())
        assert np.allclose(jt_features.numpy(), torch_features.numpy())
        assert np.allclose(jt_grid.numpy(), torch_grid.numpy(), atol=1e-05)
        assert np.allclose(torch_sample.numpy(), jt_sample.numpy(), atol=1e-01)
Beispiel #2
0
def affine_align_gpu(features, idxs, align_size, Hs):
    def _transform_matrix(Hs, w, h):
        _Hs = np.zeros(Hs.shape, dtype=np.float32)
        for i, H in enumerate(Hs):
            try:
                H0 = np.concatenate((H, np.array([[0, 0, 1]])), axis=0)
                A = np.array([[2.0 / w, 0, -1], [0, 2.0 / h, -1], [0, 0, 1]])
                A_inv = np.array([[w / 2.0, 0, w / 2.0], [0, h / 2.0, h / 2.0],
                                  [0, 0, 1]])
                H0 = A.dot(H0).dot(A_inv)
                H0 = np.linalg.inv(H0)
                _Hs[i] = H0[:-1]
            except:
                print('[error in (affine_align_gpu)]', H)
        return _Hs

    bz, C_feat, H_feat, W_feat = features.size()
    N = len(idxs)
    feature_select = features[
        idxs]  # (N, feature_channel, feature_size, feature_size)
    # transform coordinate system
    Hs_new = _transform_matrix(Hs[:, 0:2, :], w=W_feat,
                               h=H_feat)  # return (N, 2, 3)
    Hs_var = jt.array(Hs_new)
    Hs_var.stop_grad()
    ## theta (Variable) – input batch of affine matrices (N x 2 x 3)
    ## size (torch.Size) – the target output image size (N x C x H x W)
    ## output Tensor of size (N x H x W x 2)
    flow = nn.affine_grid(theta=Hs_var,
                          size=(N, C_feat, H_feat, W_feat),
                          align_corners=True).float()
    flow = flow[:, :align_size[0], :align_size[1], :]
    ## input (Variable) – input batch of images (N x C x IH x IW)
    ## grid (Variable) – flow-field of size (N x OH x OW x 2)
    ## padding_mode (str) – padding mode for outside grid values ‘zeros’ | ‘border’. Default: ‘zeros’
    rois = nn.grid_sample(feature_select,
                          flow,
                          mode='bilinear',
                          padding_mode='zeros',
                          align_corners=True)  # 'zeros' | 'border'
    return rois