コード例 #1
0
def test_mask_caching_2():
    input_masks = [
        lambda: np.ones((128, 128)),
        lambda: np.zeros((128, 128)),
    ]
    mask_container = MaskContainer(mask_factories=input_masks, dtype="float32")

    shape1 = Shape((16 * 16, 128, 128), sig_dims=2)
    shape2 = Shape((8 * 16, 128, 128), sig_dims=2)
    slice_ = Slice(origin=(0, 0, 0), shape=shape1)
    mask_container.get(slice_)

    key = (mask_container.dtype, False, True, 'numpy')

    cache_info = mask_container._get_masks_for_slice[key].cache_info()
    assert cache_info.hits == 0
    assert cache_info.misses == 1

    mask_container.get(slice_)

    cache_info = mask_container._get_masks_for_slice[key].cache_info()
    assert cache_info.hits == 1
    assert cache_info.misses == 1

    slice_ = Slice(origin=(1, 0, 0), shape=shape2)

    mask_container.get(slice_)

    cache_info = mask_container._get_masks_for_slice[key].cache_info()
    assert cache_info.hits == 2
    assert cache_info.misses == 1
コード例 #2
0
 def _make_mask_container(self):
     p = self.params
     return MaskContainer(p.mask_factories,
                          dtype=p.mask_dtype,
                          use_sparse=p.use_sparse,
                          count=p.mask_count,
                          backend=self.backend)
コード例 #3
0
    def get_task_data(self):
        ""
        match_pattern = self.params.match_pattern
        crop_size = match_pattern.get_crop_size()
        size = (2 * crop_size + 1, 2 * crop_size + 1)
        template = match_pattern.get_mask(sig_shape=size)
        steps = self.params.steps
        peak_offsetY, peak_offsetX = np.mgrid[-steps:steps + 1, -steps:steps + 1]

        offsetY = self.params.peaks[:, 0, np.newaxis, np.newaxis] + peak_offsetY - crop_size
        offsetX = self.params.peaks[:, 1, np.newaxis, np.newaxis] + peak_offsetX - crop_size

        offsetY = offsetY.flatten()
        offsetX = offsetX.flatten()

        stack = functools.partial(
            masks.sparse_template_multi_stack,
            mask_index=range(len(offsetY)),
            offsetX=offsetX,
            offsetY=offsetY,
            template=template,
            imageSizeX=self.meta.dataset_shape.sig[1],
            imageSizeY=self.meta.dataset_shape.sig[0]
        )
        # CSC matrices in combination with transposed data are fastest
        container = MaskContainer(mask_factories=stack, dtype=np.float32,
            use_sparse='scipy.sparse.csc')

        kwargs = {
            'mask_container': container,
            'crop_size': crop_size,
        }
        return kwargs
コード例 #4
0
def masks():
    input_masks = [
        lambda: np.ones((128, 128)),
        lambda: sparse.zeros((128, 128)),
        lambda: np.ones((128, 128)),
        lambda: sp.csr_matrix(
            ((1, ), ((64, ), (64, ))), shape=(128, 128), dtype=np.float32),
        lambda: gradient_x(128, 128, dtype=np.float32),
    ]
    return MaskContainer(mask_factories=input_masks, dtype=np.float32)
コード例 #5
0
ファイル: ssb.py プロジェクト: bangunarya/ptychography
    def get_task_data(self):
        masks, filter_center = generate_masks(
            shape=self.meta.dataset_shape,
            dtype=self.params.dtype,
            U=self.params.U,
            dpix=self.params.dpix,
            semiconv=self.params.semiconv,
            semiconv_pix=self.params.semiconv_pix,
            cx=self.params.cx,
            cy=self.params.cy,
        )

        return {
            "masks":
            MaskContainer(mask_factories=lambda: masks,
                          dtype=masks.dtype,
                          use_sparse='scipy.sparse.csc',
                          count=masks.shape[0]),
            "filter_center":
            filter_center
        }
コード例 #6
0
def test_validate_ssb(real_params, real_intensity_ds, real_plane_wave,
                      real_reference_ssb, lt_ctx, method, external_container):
    '''
    The mask generation methods can produce slightly different masks.

    Since SSB strongly suppresses noise, including any features
    where real space and diffraction space don't properly align,
    slight differences in the mask stack can lead to amplifying errors
    if the input data contains no actual features and the signal sums up to nearly zero.

    For that reason the correctness of mask generation functions shoud be tested on
    simulated data that contains a pronounced signal.

    Furthermore, this allows to compare the reconstruction with a "ground truth" phase.
    '''
    dtype = np.float64

    shape = real_intensity_ds.shape

    # The acceleration voltage U in keV
    U = real_params["U"]
    lamb = wavelength(U)

    # STEM semiconvergence angle in radians
    semiconv = real_params["semiconv"]
    # Diameter of the primary beam in the diffraction pattern in pixels
    semiconv_pix = real_params["semiconv_pix"]

    cy = real_params["cy"]
    cx = real_params["cx"]

    dpix = real_params["dpix"]

    transformation = real_params["transformation"]

    if external_container:
        masks = generate_masks(
            reconstruct_shape=shape[:2],
            mask_shape=shape[2:],
            dtype=dtype,
            lamb=lamb,
            dpix=dpix,
            semiconv=semiconv,
            semiconv_pix=semiconv_pix,
            cy=cy,
            cx=cx,
            transformation=transformation,
            method=method,
            cutoff=1,
        )

        mask_container = MaskContainer(
            mask_factories=lambda: masks,
            dtype=masks.dtype,
            use_sparse='scipy.sparse.csc',
            count=masks.shape[0],
        )
    else:
        mask_container = None

    udf = SSB_UDF(
        lamb=lamb,
        dpix=dpix,
        semiconv=semiconv,
        semiconv_pix=semiconv_pix,
        dtype=dtype,
        cy=cy,
        cx=cx,
        mask_container=mask_container,
        method=method,
        cutoff=1,
    )

    result = lt_ctx.run_udf(udf=udf, dataset=real_intensity_ds)

    result_f, reference_masks = real_reference_ssb

    ssb_res = get_results(result)
    # We apply the amplitude scaling to the raw reference SSB result
    reference_ssb_raw = np.fft.ifft2(result_f)
    reference_ssb_amp = np.abs(reference_ssb_raw)
    reference_ssb_phase = np.angle(reference_ssb_raw)
    reference_ssb_res = np.sqrt(reference_ssb_amp) * np.exp(
        1j * reference_ssb_phase)

    ssb_phase = np.angle(ssb_res)
    ref_phase = np.angle(real_plane_wave)

    ssb_amp = np.abs(ssb_res)
    ref_amp = np.abs(real_plane_wave)

    # The phases are usually shifted by a constant offset
    # Looking at Std removes the offset
    # TODO the current data is at the limit of SSB reconstruction. Better data should be simulated.
    # TODO work towards 100 % correspondence with suitable test dataset
    assert np.std(ssb_phase - ref_phase) < 0.1 * np.std(ssb_phase)

    # Compare reconstructed amplitude
    # We can't use std(amp) since the amplitude is nearly constant over the FOV
    print("Max ref: ", np.max(np.abs(ssb_amp - ref_amp)),
          np.max(np.abs(ref_amp)))
    assert np.max(np.abs(ssb_amp - ref_amp)) < 0.1 * np.max(np.abs(ref_amp))

    # Make sure the methods are at least reasonably comparable
    # TODO work towards 100 % correspondence with suitable test dataset
    # TODO make the amplitude of the reconstruction match
    print("Max between: ", np.max(np.abs(ssb_res - reference_ssb_res)),
          np.max(np.abs(ssb_res)))
    print("Std between: ", np.std(ssb_res - reference_ssb_res),
          np.std(ssb_res))
    assert np.max(
        np.abs(ssb_res - reference_ssb_res)) < 0.01 * np.max(np.abs(ssb_res))
    assert np.std(ssb_res - reference_ssb_res) < 0.01 * np.std(ssb_res)
コード例 #7
0
def test_ssb_container(dpix, lt_ctx, backend):
    try:
        if backend == 'cupy':
            set_use_cuda(0)
        dtype = np.float64

        scaling = 4
        shape = (29, 30, 189 // scaling, 197 // scaling)

        # The acceleration voltage U in keV
        U = 300
        lamb = wavelength(U)

        # STEM semiconvergence angle in radians
        semiconv = 25e-3
        # Diameter of the primary beam in the diffraction pattern in pixels
        semiconv_pix = 78.6649 / scaling

        cy = 93 // scaling
        cx = 97 // scaling

        input_data = (np.random.uniform(0, 1, np.prod(shape)) *
                      np.linspace(1.0, 1000.0, num=np.prod(shape)))
        input_data = input_data.astype(np.float64).reshape(shape)

        masks = generate_masks(reconstruct_shape=shape[:2],
                               mask_shape=shape[2:],
                               dtype=dtype,
                               lamb=lamb,
                               dpix=dpix,
                               semiconv=semiconv,
                               semiconv_pix=semiconv_pix,
                               cy=cy,
                               cx=cx,
                               method='subpix')

        mask_container = MaskContainer(
            mask_factories=lambda: masks,
            dtype=masks.dtype,
            use_sparse='scipy.sparse.csc',
            count=masks.shape[0],
        )

        udf = SSB_UDF(lamb=lamb,
                      dpix=dpix,
                      semiconv=semiconv,
                      semiconv_pix=semiconv_pix,
                      dtype=dtype,
                      cy=cy,
                      cx=cx,
                      mask_container=mask_container)

        dataset = MemoryDataSet(
            data=input_data,
            tileshape=(20, shape[2], shape[3]),
            num_partitions=2,
            sig_dims=2,
        )

        result = lt_ctx.run_udf(udf=udf, dataset=dataset)

        result_f, reference_masks = reference_ssb(input_data,
                                                  U=U,
                                                  dpix=dpix,
                                                  semiconv=semiconv,
                                                  semiconv_pix=semiconv_pix,
                                                  cy=cy,
                                                  cx=cx)

        task_data = udf.get_task_data()

        udf_masks = task_data['masks'].computed_masks

        half_y = shape[0] // 2 + 1
        # Use symmetry and reshape like generate_masks()
        reference_masks = reference_masks[:half_y].reshape(
            (half_y * shape[1], shape[2], shape[3]))

        print(np.max(np.abs(udf_masks.todense() - reference_masks)))

        print(np.max(np.abs(result['pixels'].data - result_f)))

        assert np.allclose(result['pixels'].data, result_f)
    finally:
        if backend == 'cupy':
            set_use_cpu(0)
コード例 #8
0
ファイル: udf.py プロジェクト: uellue/ptychography
    def get_task_data(self):
        ''
        # shorthand, cupy or numpy
        xp = self.xp

        if self.meta.device_class == 'cpu':
            backend = 'numpy'
        elif self.meta.device_class == 'cuda':
            backend = 'cupy'
        else:
            raise ValueError("Unknown device class")

        # Hack to pass a fixed external container
        # In particular useful for single-process live processing
        # or inline executor
        if self.params.mask_container is None:
            masks = generate_masks(
                reconstruct_shape=self.reconstruct_shape,
                mask_shape=tuple(self.meta.dataset_shape.sig),
                dtype=self.params.dtype,
                lamb=self.params.lamb,
                dpix=self.params.dpix,
                semiconv=self.params.semiconv,
                semiconv_pix=self.params.semiconv_pix,
                cy=self.params.cy,
                cx=self.params.cx,
                transformation=self.params.transformation,
                cutoff=self.params.cutoff,
                method=self.params.method,
            )
            container = MaskContainer(mask_factories=lambda: masks,
                                      dtype=masks.dtype,
                                      use_sparse='scipy.sparse.csr',
                                      count=masks.shape[0],
                                      backend=backend)
        else:
            container = self.params.mask_container
            target_size = (self.reconstruct_shape[0] // 2 +
                           1) * self.reconstruct_shape[1]
            container_shape = container.computed_masks.shape
            expected_shape = (target_size, ) + tuple(
                self.meta.dataset_shape.sig)
            if container_shape != expected_shape:
                raise ValueError(
                    f"External mask container doesn't have the expected shape. "
                    f"Got {container_shape}, expected {expected_shape}. "
                    "Mask count (self.meta.dataset_shape.nav[0] // 2 + 1) "
                    "* self.meta.dataset_shape.nav[1], "
                    "Mask shape self.meta.dataset_shape.sig. "
                    "The methods generate_masks_*() help to generate a suitable mask stack."
                )

        # Precalculated LUT for Fourier transform
        # The y axis is trimmed in half since the full trotter stack is symmetric,
        # i.e. the missing half can be reconstructed from the other results
        row_steps = -2j * np.pi * np.linspace(
            0, 1, self.reconstruct_shape[0], endpoint=False)
        col_steps = -2j * np.pi * np.linspace(
            0, 1, self.reconstruct_shape[1], endpoint=False)

        half_y = self.reconstruct_shape[0] // 2 + 1
        full_x = self.reconstruct_shape[1]

        # This creates a 2D array of row x spatial frequency
        row_exp = np.exp(row_steps[:, np.newaxis] *
                         np.arange(half_y)[np.newaxis, :])
        # This creates a 2D array of col x spatial frequency
        col_exp = np.exp(col_steps[:, np.newaxis] *
                         np.arange(full_x)[np.newaxis, :])

        steps_dtype = np.result_type(np.complex64, self.params.dtype)

        return {
            "masks": container,
            "row_exp": xp.array(row_exp.astype(steps_dtype)),
            "col_exp": xp.array(col_exp.astype(steps_dtype)),
            "backend": backend
        }
コード例 #9
0
ファイル: ssb.py プロジェクト: Sniper2k/ptychography
    def get_task_data(self):
        # shorthand, cupy or numpy
        xp = self.xp

        if self.meta.device_class == 'cpu':
            backend = 'numpy'
        elif self.meta.device_class == 'cuda':
            backend = 'cupy'
        else:
            raise ValueError("Unknown device class")

        # Hack to pass a fixed external container
        # In particular useful for single-process live processing
        # or inline executor
        if self.params.mask_container is None:
            masks = generate_masks(
                reconstruct_shape=self.reconstruct_shape,
                mask_shape=tuple(self.meta.dataset_shape.sig),
                dtype=self.params.dtype,
                lamb=wavelength(self.params.U),
                dpix=self.params.dpix,
                semiconv=self.params.semiconv,
                semiconv_pix=self.params.semiconv_pix,
                center=self.params.center,
                transformation=self.params.transformation,
                cutoff=self.params.cutoff,
                method=self.params.method,
            )
            container = MaskContainer(mask_factories=lambda: masks,
                                      dtype=masks.dtype,
                                      use_sparse='scipy.sparse.csc',
                                      count=masks.shape[0],
                                      backend=backend)
        else:
            container = self.params.mask_container
            target_size = (self.reconstruct_shape[0] // 2 +
                           1) * self.reconstruct_shape[1]
            container_shape = container.computed_masks.shape
            expected_shape = (target_size, ) + tuple(
                self.meta.dataset_shape.sig)
            if container_shape != expected_shape:
                raise ValueError(
                    f"External mask container doesn't have the expected shape. "
                    f"Got {container_shape}, expected {expected_shape}. "
                    "Mask count (self.meta.dataset_shape.nav[0] // 2 + 1) "
                    "* self.meta.dataset_shape.nav[1], "
                    "Mask shape self.meta.dataset_shape.sig. "
                    "The methods generate_masks_*() help to generate a suitable mask stack."
                )
        ds_nav = tuple(self.meta.dataset_shape.nav)

        # Precalculated values for Fourier transform
        # The y axis is trimmed in half since the full trotter stack is symmetric,
        # i.e. the missing half can be reconstructed from the other results
        row_steps = -2j * np.pi * np.linspace(
            0, 1, self.reconstruct_shape[0], endpoint=False)
        col_steps = -2j * np.pi * np.linspace(
            0, 1, self.reconstruct_shape[1], endpoint=False)

        half_y = self.reconstruct_shape[0] // 2 + 1
        full_x = self.reconstruct_shape[1]

        row_exp = np.exp(row_steps[:, np.newaxis] *
                         np.arange(half_y)[np.newaxis, :])
        col_exp = np.exp(col_steps[:, np.newaxis] *
                         np.arange(full_x)[np.newaxis, :])

        # Calculate the x and y indices in the navigation dimension
        # for each frame, taking the ROI into account
        y_positions, x_positions = np.mgrid[0:ds_nav[0], 0:ds_nav[1]]

        if self.meta.roi is None:
            y_map = y_positions.flatten()
            x_map = x_positions.flatten()
        else:
            y_map = y_positions[self.meta.roi]
            x_map = x_positions[self.meta.roi]

        steps_dtype = np.result_type(np.complex64, self.params.dtype)

        return {
            "masks": container,
            # Frame positions in the dataset masked by ROI
            # to easily access position in dataset when
            # processing with ROI applied
            "y_map": xp.array(y_map),
            "x_map": xp.array(x_map),
            "row_exp": xp.array(row_exp.astype(steps_dtype)),
            "col_exp": xp.array(col_exp.astype(steps_dtype)),
            "backend": backend
        }
コード例 #10
0
ファイル: ssb_lowmem.py プロジェクト: uellue/ptychography
    def get_task_data(self):
        # shorthand, cupy or numpy
        xp = self.xp
        # Hack to pass a fixed external container
        # In particular useful for single-process live processing
        # or inline executor
        ds_nav = tuple(self.meta.dataset_shape.nav)
        y_positions, x_positions = np.mgrid[0:ds_nav[0], 0:ds_nav[1]]

        # Precalculated values for Fourier transform
        row_steps = -2j*np.pi*np.linspace(0, 1, self.reconstruct_shape[0], endpoint=False)
        col_steps = -2j*np.pi*np.linspace(0, 1, self.reconstruct_shape[1], endpoint=False)

        if self.meta.roi is None:
            y_map = y_positions.flatten()
            x_map = x_positions.flatten()
        else:
            y_map = y_positions[self.meta.roi]
            x_map = x_positions[self.meta.roi]
        if self.params.filter_center is None:
            cy, cx = self.params.center
            mask_shape = tuple(self.meta.dataset_shape.sig)
            filter_center = circular(
                centerX=cx, centerY=cy,
                imageSizeX=mask_shape[1], imageSizeY=mask_shape[0],
                radius=self.params.semiconv_pix,
                antialiased=True
            ).astype(self.params.dtype)
        else:
            filter_center = self.params.filter_center.astype(self.params.dtype)

        steps_dtype = np.result_type(np.complex64, self.params.dtype)

        masks = generate_masks(
            reconstruct_shape=self.reconstruct_shape,
            mask_shape=tuple(self.meta.dataset_shape.sig),
            dtype=self.params.dtype,
            wavelength=wavelength(self.params.U),
            dpix=self.params.dpix,
            semiconv=self.params.semiconv,
            semiconv_pix=self.params.semiconv_pix,
            center=self.params.center,
            transformation=self.params.transformation,
            cutoff=self.params.cutoff,
            filter_center=filter_center
        )

        skyline = generate_skyline(
            reconstruct_shape=self.reconstruct_shape,
            mask_shape=tuple(self.meta.dataset_shape.sig),
            dtype=self.params.dtype,
            wavelength=wavelength(self.params.U),
            dpix=self.params.dpix,
            semiconv=self.params.semiconv,
            semiconv_pix=self.params.semiconv_pix,
            tiling_scheme=self.meta.tiling_scheme,
            filter_center=filter_center,
            center=self.params.center,
            transformation=self.params.transformation,
            cutoff=self.params.cutoff,
            debug_masks=masks.reshape((
                self.reconstruct_shape[0]//2 + 1,
                self.reconstruct_shape[1],
                *tuple(self.meta.dataset_shape.sig)
            )).todense()
        )
        container = MaskContainer(
            mask_factories=lambda: masks, dtype=masks.dtype,
            use_sparse='scipy.sparse.csc', count=masks.shape[0], backend=self.meta.backend
        )
        return {
            # Frame positions in the dataset masked by ROI
            # to easily access position in dataset when
            # processing with ROI applied
            "skyline": skyline,
            "masks": container,
            "filter_center": xp.array(filter_center),
            "y_map": xp.array(y_map),
            "x_map": xp.array(x_map),
            "row_steps": xp.array(row_steps.astype(steps_dtype)),
            "col_steps": xp.array(col_steps.astype(steps_dtype)),
        }