Ejemplo n.º 1
0
def test_mi_gradient_dense():
    # Test the gradient of mutual information
    h = 1e-5
    for ttype in factors:
        transform = regtransforms[ttype]
        dim = ttype[1]
        if dim == 2:
            nslices = 1
            warp_method = vf.warp_2d_affine
        else:
            nslices = 45
            warp_method = vf.warp_3d_affine
        # Get data (pair of images related to each other by an known transform)
        factor = factors[ttype]
        static, moving, static_g2w, moving_g2w, smask, mmask, M = \
            setup_random_transform(transform, factor, nslices, 5.0)
        smask = None
        mmask = None

        # Prepare a MattesBase instance
        # The computation of the metric is done in 3 steps:
        # 1.Compute the joint distribution
        # 2.Compute the gradient of the joint distribution
        # 3.Compute the metric's value and gradient using results from 1 and 2
        metric = MattesBase(32)
        metric.setup(static, moving, smask, mmask)

        # 1. Update the joint distribution
        metric.update_pdfs_dense(static.astype(np.float64),
                                 moving.astype(np.float64))

        # 2. Update the joint distribution gradient (the derivative of each
        # histogram cell w.r.t. the transform parameters). This requires
        # among other things, the spatial gradient of the moving image.
        theta = transform.get_identity_parameters().copy()
        grid_to_space = np.eye(dim + 1)
        spacing = np.ones(dim, dtype=np.float64)
        shape = np.array(static.shape, dtype=np.int32)
        mgrad, inside = vf.gradient(moving.astype(np.float32), moving_g2w,
                                    spacing, shape, grid_to_space)
        metric.update_gradient_dense(theta, transform,
                                     static.astype(np.float64),
                                     moving.astype(np.float64),
                                     grid_to_space, mgrad, smask, mmask)

        # 3. Update the metric (in this case, the Mutual Information) and its
        # gradient, which is computed from the joint density and its gradient
        metric.update_mi_metric(update_gradient=True)

        # Now we can extract the value and gradient of the metric
        # This is the gradient according to the implementation under test
        val0 = metric.metric_val
        actual = np.copy(metric.metric_grad)

        # Compute the gradient using finite-diferences
        n = transform.get_number_of_parameters()
        expected = np.empty_like(actual)
        for i in range(n):
            dtheta = theta.copy()
            dtheta[i] += h

            M = transform.param_to_matrix(dtheta)
            shape = np.array(static.shape, dtype=np.int32)
            warped = np.array(warp_method(moving.astype(np.float32), shape, M))
            metric.update_pdfs_dense(static.astype(np.float64),
                                     warped.astype(np.float64))
            metric.update_mi_metric(update_gradient=False)
            val1 = metric.metric_val
            expected[i] = (val1 - val0) / h

        dp = expected.dot(actual)
        enorm = np.linalg.norm(expected)
        anorm = np.linalg.norm(actual)
        nprod = dp / (enorm * anorm)
        assert(nprod >= 0.999)
Ejemplo n.º 2
0
def test_mi_gradient_dense():
    # Test the gradient of mutual information
    h = 1e-5
    for ttype in factors:
        transform = regtransforms[ttype]
        dim = ttype[1]
        if dim == 2:
            nslices = 1
            warp_method = vf.warp_2d_affine
        else:
            nslices = 45
            warp_method = vf.warp_3d_affine
        # Get data (pair of images related to each other by an known transform)
        factor = factors[ttype]
        static, moving, static_g2w, moving_g2w, smask, mmask, M = \
            setup_random_transform(transform, factor, nslices, 5.0)
        smask = None
        mmask = None

        # Prepare a MattesBase instance
        # The computation of the metric is done in 3 steps:
        # 1.Compute the joint distribution
        # 2.Compute the gradient of the joint distribution
        # 3.Compute the metric's value and gradient using results from 1 and 2
        metric = MattesBase(32)
        metric.setup(static, moving, smask, mmask)

        # 1. Update the joint distribution
        metric.update_pdfs_dense(static.astype(np.float64),
                                 moving.astype(np.float64))

        # 2. Update the joint distribution gradient (the derivative of each
        # histogram cell w.r.t. the transform parameters). This requires
        # among other things, the spatial gradient of the moving image.
        theta = transform.get_identity_parameters().copy()
        grid_to_space = np.eye(dim + 1)
        spacing = np.ones(dim, dtype=np.float64)
        shape = np.array(static.shape, dtype=np.int32)
        mgrad, inside = vf.gradient(moving.astype(np.float32), moving_g2w,
                                    spacing, shape, grid_to_space)
        metric.update_gradient_dense(theta, transform,
                                     static.astype(np.float64),
                                     moving.astype(np.float64), grid_to_space,
                                     mgrad, smask, mmask)

        # 3. Update the metric (in this case, the Mutual Information) and its
        # gradient, which is computed from the joint density and its gradient
        metric.update_mi_metric(update_gradient=True)

        # Now we can extract the value and gradient of the metric
        # This is the gradient according to the implementation under test
        val0 = metric.metric_val
        actual = np.copy(metric.metric_grad)

        # Compute the gradient using finite-diferences
        n = transform.get_number_of_parameters()
        expected = np.empty_like(actual)
        for i in range(n):
            dtheta = theta.copy()
            dtheta[i] += h

            M = transform.param_to_matrix(dtheta)
            shape = np.array(static.shape, dtype=np.int32)
            warped = np.array(warp_method(moving.astype(np.float32), shape, M))
            metric.update_pdfs_dense(static.astype(np.float64),
                                     warped.astype(np.float64))
            metric.update_mi_metric(update_gradient=False)
            val1 = metric.metric_val
            expected[i] = (val1 - val0) / h

        dp = expected.dot(actual)
        enorm = np.linalg.norm(expected)
        anorm = np.linalg.norm(actual)
        nprod = dp / (enorm * anorm)
        assert (nprod >= 0.999)
Ejemplo n.º 3
0
def test_joint_pdf_gradients_dense():
    # Compare the analytical and numerical (finite differences) gradient of the
    # joint distribution (i.e. derivatives of each histogram cell) w.r.t. the
    # transform parameters. Since the histograms are discrete partitions of the
    # image intensities, the finite difference approximation is normally not
    # very close to the analytical derivatives. Other sources of error are the
    # interpolation used when warping the images and the boundary intensities
    # introduced when interpolating outside of the image (i.e. some "zeros" are
    # introduced at the boundary which affect the numerical derivatives but is
    # not taken into account by the analytical derivatives). Thus, we need to
    # relax the verification. Instead of looking for the analytical and
    # numerical gradients to be very close to each other, we will verify that
    # they approximately point in the same direction by testing if the angle
    # they form is close to zero.
    h = 1e-4
    for ttype in factors:
        dim = ttype[1]
        if dim == 2:
            nslices = 1
            warp_method = vf.warp_2d_affine
        else:
            nslices = 45
            warp_method = vf.warp_3d_affine

        transform = regtransforms[ttype]
        factor = factors[ttype]
        theta = transform.get_identity_parameters()

        static, moving, static_g2w, moving_g2w, smask, mmask, M = \
            setup_random_transform(transform, factor, nslices, 5.0)
        metric = MattesBase(32)
        metric.setup(static, moving, smask, mmask)

        # Compute the gradient at theta with the implementation under test
        M = transform.param_to_matrix(theta)
        shape = np.array(static.shape, dtype=np.int32)

        warped = warp_method(moving.astype(np.float32), shape, M)
        warped = np.array(warped)
        metric.update_pdfs_dense(static.astype(np.float64),
                                 warped.astype(np.float64))
        # Get the joint distribution evaluated at theta
        J0 = np.copy(metric.joint)
        grid_to_space = np.eye(dim + 1)
        spacing = np.ones(dim, dtype=np.float64)
        mgrad, inside = vf.gradient(moving.astype(np.float32), moving_g2w,
                                    spacing, shape, grid_to_space)
        id = transform.get_identity_parameters()
        metric.update_gradient_dense(id, transform, static.astype(np.float64),
                                     warped.astype(np.float64), grid_to_space,
                                     mgrad, smask, mmask)
        actual = np.copy(metric.joint_grad)
        # Now we have the gradient of the joint distribution w.r.t. the
        # transform parameters

        # Compute the gradient using finite-diferences
        n = transform.get_number_of_parameters()
        expected = np.empty_like(actual)
        for i in range(n):
            dtheta = theta.copy()
            dtheta[i] += h
            # Update the joint distribution with the warped moving image
            M = transform.param_to_matrix(dtheta)
            shape = np.array(static.shape, dtype=np.int32)
            warped = warp_method(moving.astype(np.float32), shape, M)
            warped = np.array(warped)
            metric.update_pdfs_dense(static.astype(np.float64),
                                     warped.astype(np.float64))
            J1 = np.copy(metric.joint)
            expected[..., i] = (J1 - J0) / h

        # Dot product and norms of gradients of each joint histogram cell
        # i.e. the derivatives of each cell w.r.t. all parameters
        P = (expected * actual).sum(2)
        enorms = np.sqrt((expected ** 2).sum(2))
        anorms = np.sqrt((actual ** 2).sum(2))
        prodnorms = enorms * anorms
        # Cosine of angle between the expected and actual gradients.
        # Exclude very small gradients
        P[prodnorms > 1e-6] /= (prodnorms[prodnorms > 1e-6])
        P[prodnorms <= 1e-6] = 0
        # Verify that a large proportion of the gradients point almost in
        # the same direction. Disregard very small gradients
        mean_cosine = P[P != 0].mean()
        std_cosine = P[P != 0].std()
        assert(mean_cosine > 0.9)
        assert(std_cosine < 0.25)
Ejemplo n.º 4
0
def test_joint_pdf_gradients_dense():
    # Compare the analytical and numerical (finite differences) gradient of the
    # joint distribution (i.e. derivatives of each histogram cell) w.r.t. the
    # transform parameters. Since the histograms are discrete partitions of the
    # image intensities, the finite difference approximation is normally not
    # very close to the analytical derivatives. Other sources of error are the
    # interpolation used when warping the images and the boundary intensities
    # introduced when interpolating outside of the image (i.e. some "zeros" are
    # introduced at the boundary which affect the numerical derivatives but is
    # not taken into account by the analytical derivatives). Thus, we need to
    # relax the verification. Instead of looking for the analytical and
    # numerical gradients to be very close to each other, we will verify that
    # they approximately point in the same direction by testing if the angle
    # they form is close to zero.
    h = 1e-4
    for ttype in factors:
        dim = ttype[1]
        if dim == 2:
            nslices = 1
            warp_method = vf.warp_2d_affine
        else:
            nslices = 45
            warp_method = vf.warp_3d_affine

        transform = regtransforms[ttype]
        factor = factors[ttype]
        theta = transform.get_identity_parameters()

        static, moving, static_g2w, moving_g2w, smask, mmask, M = \
            setup_random_transform(transform, factor, nslices, 5.0)
        metric = MattesBase(32)
        metric.setup(static, moving, smask, mmask)

        # Compute the gradient at theta with the implementation under test
        M = transform.param_to_matrix(theta)
        shape = np.array(static.shape, dtype=np.int32)

        warped = warp_method(moving.astype(np.float32), shape, M)
        warped = np.array(warped)
        metric.update_pdfs_dense(static.astype(np.float64),
                                 warped.astype(np.float64))
        # Get the joint distribution evaluated at theta
        J0 = np.copy(metric.joint)
        grid_to_space = np.eye(dim + 1)
        spacing = np.ones(dim, dtype=np.float64)
        mgrad, inside = vf.gradient(moving.astype(np.float32), moving_g2w,
                                    spacing, shape, grid_to_space)
        id = transform.get_identity_parameters()
        metric.update_gradient_dense(id, transform, static.astype(np.float64),
                                     warped.astype(np.float64), grid_to_space,
                                     mgrad, smask, mmask)
        actual = np.copy(metric.joint_grad)
        # Now we have the gradient of the joint distribution w.r.t. the
        # transform parameters

        # Compute the gradient using finite-diferences
        n = transform.get_number_of_parameters()
        expected = np.empty_like(actual)
        for i in range(n):
            dtheta = theta.copy()
            dtheta[i] += h
            # Update the joint distribution with the warped moving image
            M = transform.param_to_matrix(dtheta)
            shape = np.array(static.shape, dtype=np.int32)
            warped = warp_method(moving.astype(np.float32), shape, M)
            warped = np.array(warped)
            metric.update_pdfs_dense(static.astype(np.float64),
                                     warped.astype(np.float64))
            J1 = np.copy(metric.joint)
            expected[..., i] = (J1 - J0) / h

        # Dot product and norms of gradients of each joint histogram cell
        # i.e. the derivatives of each cell w.r.t. all parameters
        P = (expected * actual).sum(2)
        enorms = np.sqrt((expected**2).sum(2))
        anorms = np.sqrt((actual**2).sum(2))
        prodnorms = enorms * anorms
        # Cosine of angle between the expected and actual gradients.
        # Exclude very small gradients
        P[prodnorms > 1e-6] /= (prodnorms[prodnorms > 1e-6])
        P[prodnorms <= 1e-6] = 0
        # Verify that a large proportion of the gradients point almost in
        # the same direction. Disregard very small gradients
        mean_cosine = P[P != 0].mean()
        std_cosine = P[P != 0].std()
        assert (mean_cosine > 0.9)
        assert (std_cosine < 0.25)