Beispiel #1
0
def make_projection(shape,
                    ps,
                    axis,
                    mesh,
                    center,
                    lamino_angle,
                    tomo_angle,
                    ss=1):
    from syris.imageprocessing import bin_image
    if axis == 'z':
        lamino_angle = lamino_angle + 90 * q.deg
        tomo_angle = -tomo_angle
    axis = Y_AX if axis == 'y' else Z_AX
    mesh.clear_transformation()
    mesh.translate(center)
    mesh.rotate(lamino_angle, X_AX)
    mesh.rotate(tomo_angle, axis)

    orig_shape = shape
    shape = tuple([n * ss for n in orig_shape])
    ps = ps / ss

    # t=None for trajectory override
    projection = mesh.project(shape, ps, t=None)
    if ss > 1:
        projection = bin_image(projection, orig_shape, average=True)

    return projection.get()
Beispiel #2
0
    def get_image(self,
                  photons,
                  shot_noise=True,
                  amplifier_noise=True,
                  psf=True,
                  queue=None):
        """Get digital counts image from incoming *photons*. The resulting image is based on the
        incoming photons and dark current. We apply noise based on EMVA 1288 standard according to
        which the variance :math:`\sigma_y^2 = K^2 ( \sigma_e^2 + \sigma_d^2 ) + \sigma_q^2`, where
        :math:`K` is the system gain, :math:`\sigma_e^2` is the poisson- distributed shot noise
        variance, :math:`\sigma_d^2` is the normal distributed electronics noise variance and
        :math:`\sigma_q^2` is the quantization noise variance. If *shot_noise* is False don't apply
        it. If *amplifier_noise* is False don't apply it as well. If *psf* is False don't apply the
        point spread function.
        """
        if self._last_input_shape != photons.shape:
            self._last_input_shape = photons.shape
            self._bin_factor = (photons.shape[0] / self.shape[0],
                                photons.shape[1] / self.shape[1])

        if queue is None:
            queue = cfg.OPENCL.queue

        # Shot noise
        # Adjust dark current for later binning and gain
        dark = float(
            self.dark_current) / self._bin_factor[0] / self._bin_factor[1]
        electrons = dark + gutil.get_host(photons)

        if self._bin_factor != (1, 1):
            if psf:
                sigma = (fwnm_to_sigma(self._bin_factor[0]),
                         fwnm_to_sigma(self._bin_factor[1]))
                small = decimate(electrons,
                                 self.shape,
                                 sigma=sigma,
                                 queue=queue)
            else:
                small = bin_image(electrons, self.shape, queue=queue)
            electrons = gutil.get_host(small)

        if shot_noise:
            electrons = np.random.poisson(electrons)

        if amplifier_noise and self.amplifier_sigma > 0:
            # Add electronics noise
            electrons = np.random.normal(electrons, self.amplifier_sigma)

        counts = self.gain * electrons

        # Cut the values beyond the maximum represented grey value given by
        # bytes per pixel.
        counts[counts > self.max_grey_value] = self.max_grey_value

        # Apply quantization noise
        return counts.astype(self.dtype)
Beispiel #3
0
    def test_sum(self):
        m = 8
        n = 16
        image = np.arange(m * n).reshape(m, n).astype(cfg.PRECISION.np_float)
        cl_im = cl_array.to_device(cfg.OPENCL.queue, image)
        sizes = (1, 2, 4)
        for shape in itertools.product(sizes, sizes):
            region = (m // shape[0], n // shape[1])
            gt = bin_cpu(image, shape)
            res = ip.bin_image(cl_im, shape)
            np.testing.assert_equal(gt, res)

            # Test averaging
            res = ip.bin_image(cl_im, shape, average=True)
            np.testing.assert_equal(gt / (region[0] * region[1]), res)

        # Not a divisor
        self.assertRaises(RuntimeError, ip.bin_image, cl_im, (4, 10))
        self.assertRaises(RuntimeError, ip.bin_image, cl_im, (5, 8))
        self.assertRaises(RuntimeError, ip.bin_image, cl_im, (4, 7), offset=(2, 2))
    def test_sum(self):
        m = 8
        n = 16
        image = np.arange(m * n).reshape(m, n).astype(cfg.PRECISION.np_float)
        cl_im = cl_array.to_device(cfg.OPENCL.queue, image)
        sizes = (1, 2, 4)
        for shape in itertools.product(sizes, sizes):
            region = (m / shape[0], n / shape[1])
            gt = bin_cpu(image, shape)
            res = ip.bin_image(cl_im, shape)
            np.testing.assert_equal(gt, res)

            # Test averaging
            res = ip.bin_image(cl_im, shape, average=True)
            np.testing.assert_equal(gt / (region[0] * region[1]), res)

        # Not a divisor
        self.assertRaises(RuntimeError, ip.bin_image, cl_im, (4, 10))
        self.assertRaises(RuntimeError, ip.bin_image, cl_im, (5, 8))
        self.assertRaises(RuntimeError, ip.bin_image, cl_im, (4, 7), offset=(2, 2))
Beispiel #5
0
def make_ground_truth(args, shape, mesh):
    """Shape is (y, x), so the total number of slices is y."""
    import syris.config as cfg
    from syris.imageprocessing import bin_image

    if args.z_chunk % args.supersampling:
        raise ValueError('z_chunk must be dividable by supersampling')

    queue = cfg.OPENCL.queue
    # Move the mesh to the middle
    ps = args.pixel_size / args.supersampling
    psm = ps.simplified.magnitude
    orig_shape = shape
    shape = tuple([n * args.supersampling for n in shape])
    # Make sure the projections are computed with the same x- and y-offsets
    point = (shape[1] * psm / 2, shape[0] * psm / 2, shape[1] * psm / 2) * q.m
    LOG.info('Mesh shift: {}'.format(point.rescale(q.um)))
    LOG.info('Mesh shift in pixels: {}'.format(
        (point / args.pixel_size).simplified.magnitude))
    mesh.translate(point)
    mesh.transform()
    mesh.sort()

    z_stack = np.empty((args.supersampling, ) + orig_shape,
                       dtype=cfg.PRECISION.np_float)

    for i in range(0, shape[0], args.z_chunk):
        end = min(i + args.z_chunk, shape[0])
        offset = (0, i * ps.rescale(q.um).magnitude, 0) * q.um
        slices = mesh.compute_slices((end - i, ) + shape, ps,
                                     offset=offset).get()
        LOG.info('Computing slices {}-{}'.format(i, end))
        enumerated = list(enumerate(slices))[::args.supersampling]
        for j, sl in enumerated:
            # Z-dimension downsampling
            for k in range(args.supersampling):
                z_stack[k] = bin_image(slices[j + k],
                                       orig_shape,
                                       average=True,
                                       queue=queue).get()
            # Sum only the slices which are present (last run might not go to the end)
            sl = np.mean(z_stack[:slices.shape[0]], axis=0)
            index = (i + j) / args.supersampling
            save_image(args.prefix.format(index), sl)

    return sl
Beispiel #6
0
    def get_image(self, photons, shot_noise=True, amplifier_noise=True, psf=True, queue=None):
        """Get digital counts image from incoming *photons*. The resulting image is based on the
        incoming photons and dark current. We apply noise based on EMVA 1288 standard according to
        which the variance :math:`\sigma_y^2 = K^2 ( \sigma_e^2 + \sigma_d^2 ) + \sigma_q^2`, where
        :math:`K` is the system gain, :math:`\sigma_e^2` is the poisson- distributed shot noise
        variance, :math:`\sigma_d^2` is the normal distributed electronics noise variance and
        :math:`\sigma_q^2` is the quantization noise variance. If *shot_noise* is False don't apply
        it. If *amplifier_noise* is False don't apply it as well. If *psf* is False don't apply the
        point spread function.
        """
        if self._last_input_shape != photons.shape:
            self._last_input_shape = photons.shape
            self._bin_factor = (photons.shape[0] / self.shape[0], photons.shape[1] / self.shape[1])

        if queue is None:
            queue = cfg.OPENCL.queue

        # Shot noise
        # Adjust dark current for later binning and gain
        dark = float(self.dark_current) / self._bin_factor[0] / self._bin_factor[1]
        electrons = dark + gutil.get_host(photons)

        if self._bin_factor != (1, 1):
            if psf:
                sigma = (fwnm_to_sigma(self._bin_factor[0]), fwnm_to_sigma(self._bin_factor[1]))
                small = decimate(electrons, self.shape, sigma=sigma, queue=queue)
            else:
                small = bin_image(electrons, self.shape, queue=queue)
            electrons = gutil.get_host(small)

        if shot_noise:
            electrons = np.random.poisson(electrons)

        if amplifier_noise and self.amplifier_sigma > 0:
            # Add electronics noise
            electrons = np.random.normal(electrons, self.amplifier_sigma)

        counts = self.gain * electrons

        # Cut the values beyond the maximum represented grey value given by
        # bytes per pixel.
        counts[counts > self.max_grey_value] = self.max_grey_value

        # Apply quantization noise
        return counts.astype(self.dtype)
Beispiel #7
0
def make_projection(shape, ps, axis, mesh, center, lamino_angle, tomo_angle, ss=1):
    from syris.imageprocessing import bin_image
    if axis == 'z':
        lamino_angle = lamino_angle + 90 * q.deg
        tomo_angle = -tomo_angle
    axis = Y_AX if axis == 'y' else Z_AX
    mesh.clear_transformation()
    mesh.translate(center)
    mesh.rotate(lamino_angle, X_AX)
    mesh.rotate(tomo_angle, axis)

    orig_shape = shape
    shape = tuple([n * ss for n in orig_shape])
    ps = ps / ss

    # t=None for trajectory override
    projection = mesh.project(shape, ps, t=None)
    if ss > 1:
        projection = bin_image(projection, orig_shape, average=True)

    return projection.get()
Beispiel #8
0
def make_ground_truth(args, shape, mesh):
    """Shape is (y, x), so the total number of slices is y."""
    import syris.config as cfg
    from syris.imageprocessing import bin_image

    if args.z_chunk % args.supersampling:
        raise ValueError('z_chunk must be dividable by supersampling')

    queue = cfg.OPENCL.queue
    # Move the mesh to the middle
    ps = args.pixel_size / args.supersampling
    psm = ps.simplified.magnitude
    orig_shape = shape
    shape = tuple([n * args.supersampling for n in shape])
    # Make sure the projections are computed with the same x- and y-offsets
    point = (shape[1] * psm / 2, shape[0] * psm / 2, shape[1] * psm / 2) * q.m
    LOG.info('Mesh shift: {}'.format(point.rescale(q.um)))
    LOG.info('Mesh shift in pixels: {}'.format((point / args.pixel_size).simplified.magnitude))
    mesh.translate(point)
    mesh.transform()
    mesh.sort()

    z_stack = np.empty((args.supersampling,) + orig_shape, dtype=cfg.PRECISION.np_float)

    for i in range(0, shape[0], args.z_chunk):
        end = min(i + args.z_chunk, shape[0])
        offset = (0, i * ps.rescale(q.um).magnitude, 0) * q.um
        slices = mesh.compute_slices((end - i,) + shape, ps, offset=offset).get()
        LOG.info('Computing slices {}-{}'.format(i, end))
        enumerated = list(enumerate(slices))[::args.supersampling]
        for j, sl in enumerated:
            # Z-dimension downsampling
            for k in range(args.supersampling):
                z_stack[k] = bin_image(slices[j + k], orig_shape, average=True, queue=queue).get()
            # Sum only the slices which are present (last run might not go to the end)
            sl = np.mean(z_stack[:slices.shape[0]], axis=0)
            index = (i + j) / args.supersampling
            save_image(args.prefix.format(index), sl)

    return sl