Ejemplo n.º 1
0
def test_transformation_identity():
    data_shape = np.random.randint(1, 77, 3, dtype=int)
    data = np.random.random(data_shape)
    source_shape = data_shape[1:]
    target_shape = data_shape[1:]
    m = image_transformation_matrix(source_shape=source_shape,
                                    target_shape=target_shape,
                                    affine_transformation=lambda x: x)
    assert np.allclose(data, apply_matrix(data, m, target_shape))
Ejemplo n.º 2
0
def test_transformation_transpose():
    data_shape = np.random.randint(1, 77, 3, dtype=int)
    data = np.random.random(data_shape)
    source_shape = data_shape[1:]
    target_shape = tuple(reversed(data_shape[1:]))
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=lambda x: np.flip(x, axis=-1))
    assert np.allclose(np.swapaxes(data, 1, 2),
                       apply_matrix(data, m, target_shape))
Ejemplo n.º 3
0
def test_fftshift_matrix():
    data_shape = np.random.randint(1, 77, 3, dtype=int)
    data = np.random.random(data_shape)
    source_shape = data_shape[1:]
    target_shape = data_shape[1:]
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=lambda x: x,
        pre_transform=fftshift_coords(target_shape))
    assert np.allclose(np.fft.fftshift(data, axes=(1, 2)),
                       apply_matrix(data, m, target_shape))
Ejemplo n.º 4
0
def test_difftodect_com_flip_rot_scale(dim):
    lt_ctx = lt.Context(InlineJobExecutor())
    data_shape = (2, 2, dim, dim)
    data = np.zeros(data_shape)
    data[0, 0, 7, 7] = 1
    data[0, 1, 7, 8] = 1
    data[1, 1, 8, 8] = 1
    data[1, 0, 8, 7] = 1
    source_shape = data_shape[2:]
    target_shape = data_shape[2:]

    f = diffraction_to_detector(lamb=1,
                                diffraction_shape=target_shape,
                                pixel_size_real=1,
                                pixel_size_detector=1 /
                                (np.array(target_shape)) * 4,
                                cy=source_shape[0] / 2,
                                cx=source_shape[1] / 2,
                                flip_y=True,
                                scan_rotation=-90.)
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=f,
    )
    transformed_data = apply_matrix(data, m, target_shape)
    ds = lt_ctx.load('memory', data=data, sig_dims=2)
    transformed_ds = lt_ctx.load('memory', data=transformed_data, sig_dims=2)
    com_a = lt_ctx.create_com_analysis(dataset=ds,
                                       mask_radius=np.inf,
                                       flip_y=True,
                                       scan_rotation=-90.,
                                       cy=target_shape[0] / 2,
                                       cx=target_shape[1] / 2)

    com_res = lt_ctx.run(com_a)

    trans_com_a = lt_ctx.create_com_analysis(dataset=transformed_ds,
                                             mask_radius=np.inf,
                                             flip_y=False,
                                             scan_rotation=0.,
                                             cy=target_shape[0] / 2,
                                             cx=target_shape[1] / 2)
    trans_com_res = lt_ctx.run(trans_com_a)
    print(com_res.field.raw_data)
    print(trans_com_res.field.raw_data)

    assert np.allclose(com_res.field.raw_data,
                       np.array(trans_com_res.field.raw_data) / 4)
Ejemplo n.º 5
0
def test_transformation_scale():
    scale = np.random.randint(1, 7)
    data_shape = np.random.randint(1, 17, 3, dtype=int) * scale
    data = np.random.random(data_shape)
    source_shape = data_shape[1:]
    target_shape = tuple(np.array(source_shape) // scale)
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=lambda x: x * scale,
    )
    print(data_shape, source_shape, target_shape)
    res = apply_matrix(data, m, target_shape)
    # Binning, not accumulating intensity but keeping same absolute values
    ref = data.reshape((data.shape[0], target_shape[0], scale, target_shape[1],
                        scale)).mean(axis=(2, 4))
    assert np.allclose(ref, res)
Ejemplo n.º 6
0
def test_transformation_rot90():
    data_shape = np.random.randint(1, 77, 3, dtype=int)
    data = np.random.random(data_shape)
    source_shape = data_shape[1:]
    target_shape = tuple(reversed(data_shape[1:]))
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=lambda x: x @ rotate_deg(90),
        pre_transform=lambda x: x - np.array(target_shape) / 2 + 0.5,
        post_transform=lambda x: np.round(x + np.array(source_shape) / 2 - 0.5
                                          ).astype(int))
    res = apply_matrix(data, m, target_shape)
    # positive rotations are clockwise, upper right corner
    # is now lower right corner
    assert np.allclose(data[:, 0, -1], res[:, -1, -1])
    # alternative rotation: transpose and flip
    assert np.allclose(np.flip(np.transpose(data, axes=(0, 2, 1)), axis=2),
                       res)
Ejemplo n.º 7
0
def test_difftodect_identity():
    data_shape = np.random.randint(1, 77, 3, dtype=int)
    data = np.random.random(data_shape)
    source_shape = data_shape[1:]
    target_shape = data_shape[1:]

    f = diffraction_to_detector(lamb=1,
                                diffraction_shape=target_shape,
                                pixel_size_real=1,
                                pixel_size_detector=1 /
                                (np.array(target_shape)),
                                cy=source_shape[0] / 2,
                                cx=source_shape[1] / 2,
                                flip_y=False,
                                scan_rotation=0.)
    m = image_transformation_matrix(
        source_shape=source_shape,
        target_shape=target_shape,
        affine_transformation=f,
    )
    assert np.allclose(data, apply_matrix(data, m, target_shape))