def _varconvolve_2d_parametrized(image, parameters, kernel_name, sampler=None, queue=None, out=None, block=False): """Variable convolution of *image* with *parameters*, use OpoenCL kernel *kernel_name*, *sampler*, *queue*, *out* and wait if *block* is True. Return *out*. """ if queue is None: queue = cfg.OPENCL.queue if out is None: out = cl.array.Array(queue, image.shape, dtype=cfg.PRECISION.np_float) if sampler is None: sampler = cl.Sampler(queue.context, False, cl.addressing_mode.CLAMP_TO_EDGE, cl.filter_mode.NEAREST) if not isinstance(parameters, cl_array.Array): params_host = np.empty(parameters[0].shape, dtype=cfg.PRECISION.vfloat2) params_host['y'] = g_util.get_host(parameters[0]) params_host['x'] = g_util.get_host(parameters[1]) parameters = cl_array.to_device(queue, params_host) if parameters.shape != image.shape: raise ValueError("Parameters shape '{}' differs from image shape '{}'". format(parameters.shape, image.shape)) image = g_util.get_image(image, queue=queue) args = (image, out.data, sampler, cl_array.vec.make_int2(0, 0), parameters.data) varconvolve(kernel_name, image.shape[::-1], args, queue=queue, block=block) return out
def _varconvolve_2d_parametrized( image, parameters, kernel_name, sampler=None, queue=None, out=None, block=False ): """Variable convolution of *image* with *parameters*, use OpoenCL kernel *kernel_name*, *sampler*, *queue*, *out* and wait if *block* is True. Return *out*. """ if queue is None: queue = cfg.OPENCL.queue if out is None: out = cl.array.Array(queue, image.shape, dtype=cfg.PRECISION.np_float) if sampler is None: sampler = cl.Sampler( queue.context, False, cl.addressing_mode.CLAMP_TO_EDGE, cl.filter_mode.NEAREST ) if not isinstance(parameters, cl_array.Array): params_host = np.empty(parameters[0].shape, dtype=cfg.PRECISION.vfloat2) params_host["y"] = g_util.get_host(parameters[0]) params_host["x"] = g_util.get_host(parameters[1]) parameters = cl_array.to_device(queue, params_host) if parameters.shape != image.shape: raise ValueError( "Parameters shape '{}' differs from image shape '{}'".format( parameters.shape, image.shape ) ) image = g_util.get_image(image, queue=queue) args = (image, out.data, sampler, cl_array.vec.make_int2(0, 0), parameters.data) varconvolve(kernel_name, image.shape[::-1], args, queue=queue, block=block) return out
def get_image(self, photons, shot_noise=True, amplifier_noise=True, psf=True, queue=None): """Get digital counts image from incoming *photons*. The resulting image is based on the incoming photons and dark current. We apply noise based on EMVA 1288 standard according to which the variance :math:`\sigma_y^2 = K^2 ( \sigma_e^2 + \sigma_d^2 ) + \sigma_q^2`, where :math:`K` is the system gain, :math:`\sigma_e^2` is the poisson- distributed shot noise variance, :math:`\sigma_d^2` is the normal distributed electronics noise variance and :math:`\sigma_q^2` is the quantization noise variance. If *shot_noise* is False don't apply it. If *amplifier_noise* is False don't apply it as well. If *psf* is False don't apply the point spread function. """ if self._last_input_shape != photons.shape: self._last_input_shape = photons.shape self._bin_factor = (photons.shape[0] / self.shape[0], photons.shape[1] / self.shape[1]) if queue is None: queue = cfg.OPENCL.queue # Shot noise # Adjust dark current for later binning and gain dark = float( self.dark_current) / self._bin_factor[0] / self._bin_factor[1] electrons = dark + gutil.get_host(photons) if self._bin_factor != (1, 1): if psf: sigma = (fwnm_to_sigma(self._bin_factor[0]), fwnm_to_sigma(self._bin_factor[1])) small = decimate(electrons, self.shape, sigma=sigma, queue=queue) else: small = bin_image(electrons, self.shape, queue=queue) electrons = gutil.get_host(small) if shot_noise: electrons = np.random.poisson(electrons) if amplifier_noise and self.amplifier_sigma > 0: # Add electronics noise electrons = np.random.normal(electrons, self.amplifier_sigma) counts = self.gain * electrons # Cut the values beyond the maximum represented grey value given by # bytes per pixel. counts[counts > self.max_grey_value] = self.max_grey_value # Apply quantization noise return counts.astype(self.dtype)
def main(): args = parse_args() syris.init(device_index=0) m = 20 if args.input == 'grid': image = make_grid(args.n, m * q.m).thickness.get() elif args.input == 'lena': from scipy.misc import lena image = lena().astype(cfg.PRECISION.np_float) if args.n != image.shape[0]: image = gutil.get_host(ip.rescale(image, (args.n, args.n))) n = image.shape[0] crop_n = n - 2 * m - 2 y, x = np.mgrid[-n / 2:n / 2, -n / 2:n / 2] # Compute a such that the disk diameter is exactly the period when distance from the middle is n # / 2 a = m / (2 * (crop_n / 2.)**2) radii = (a * np.sqrt(x**2 + y**2)**2 + 1e-3).astype(cfg.PRECISION.np_float) x_param = radii y_param = radii result = ip.varconvolve_disk(image, (y_param, x_param)).get() result = ip.crop(result, (m - 1, m - 1, crop_n, crop_n)).get() radii = ip.crop(radii, (m - 1, m - 1, crop_n, crop_n)).get() image = ip.crop(image, (m - 1, m - 1, crop_n, crop_n)).get() if args.output: save_image(args.output, result) show(image, title='Original Image') show(2 * radii, title='Blurring Disk Diameters') show(result, title='Blurred Image') plt.show()
def test_fft(self): data = gpu_util.get_array(np.random.normal(100, 100, size=(4, 4)).astype(cfg.PRECISION.np_float)) orig = gpu_util.get_host(data) data = ip.fft_2(data) ip.ifft_2(data) np.testing.assert_almost_equal(orig, data.get().real, decimal=4) # With a plan from pyfft.cl import Plan plan = Plan((4, 4), queue=cfg.OPENCL.queue) data = ip.fft_2(np.copy(orig), plan=plan) ip.ifft_2(data, plan=plan) np.testing.assert_almost_equal(orig, data.get().real, decimal=4) # Test double precision syris.init(double_precision=True, device_index=0) data = gpu_util.get_array(np.random.normal(100, 100, size=(4, 4)).astype(cfg.PRECISION.np_float)) gt = np.fft.fft2(data.get()) data = ip.fft_2(data) np.testing.assert_almost_equal(gt, data.get(), decimal=4) gt = np.fft.ifft2(data.get()) data = ip.ifft_2(data) np.testing.assert_almost_equal(gt, data.get(), decimal=4)
def main(): args = parse_args() syris.init(device_index=0) m = 20 if args.input == 'grid': image = make_grid(args.n, m * q.m).thickness.get() elif args.input == 'lena': from scipy.misc import lena image = lena().astype(cfg.PRECISION.np_float) if args.n != image.shape[0]: image = gutil.get_host(ip.rescale(image, (args.n, args.n))) n = image.shape[0] crop_n = n - 2 * m - 2 y, x = np.mgrid[-n / 2:n / 2, -n / 2:n / 2] # Compute a such that the disk diameter is exactly the period when distance from the middle is n # / 2 a = m / (2 * (crop_n / 2.) ** 2) radii = (a * np.sqrt(x ** 2 + y ** 2) ** 2 + 1e-3).astype(cfg.PRECISION.np_float) x_param = radii y_param = radii result = ip.varconvolve_disk(image, (y_param, x_param)).get() result = ip.crop(result, (m - 1, m - 1, crop_n, crop_n)).get() radii = ip.crop(radii, (m - 1, m - 1, crop_n, crop_n)).get() image = ip.crop(image, (m - 1, m - 1, crop_n, crop_n)).get() if args.output: save_image(args.output, result) show(image, title='Original Image') show(2 * radii, title='Blurring Disk Diameters') show(result, title='Blurred Image') plt.show()
def make_motion(args): syris.init() n = 256 shape = (n, n) energies = np.arange(5, 30, 1) * q.keV bm, detector = make_devices(n, energies) mb = create_sample(n, detector.pixel_size, velocity=20 * q.mm / q.s) mb_2 = create_sample(n, detector.pixel_size, velocity=10 * q.mm / q.s) mb.material = get_material('pmma_5_30_kev.mat') mb_2.material = mb.material cube = make_cube() / q.m * 30 * detector.pixel_size + 0.1 * detector.pixel_size fov = detector.pixel_size * n circle = make_circle().magnitude * fov / 30000 + fov / 2 tr = Trajectory(circle, velocity=10 * q.um / q.s) glass = get_material('glass.mat') mesh = Mesh(cube, tr, material=glass) ex = Experiment([bm, mb, mb_2, mesh], bm, detector, 0 * q.m, energies) for sample in ex.samples: if sample != bm: sample.trajectory.bind(detector.pixel_size) if args.show_flat: show(get_flat(shape, energies, detector, bm), title='Counts') plt.show() if args.conduct: if args.output is not None and not os.path.exists(args.output): os.makedirs(args.output, mode=0o755) t_0 = 0 * q.s if args.num_images: t_1 = args.num_images / detector.camera.fps else: t_1 = ex.time st = time.time() mpl_im = None for i, proj in enumerate(ex.make_sequence(t_0, t_1)): image = get_host(proj) if args.show: if mpl_im is None: plt.figure() mpl_im = plt.imshow(image) plt.show(False) else: mpl_im.set_data(image) plt.draw() if args.output: path = os.path.join(args.output, 'projection_{:>05}.png').format(i) scipy.misc.imsave(path, image) print 'Maximum intensity:', image.max() print 'Duration: {} s'.format(time.time() - st) plt.show()
def get_image(self, photons, shot_noise=True, amplifier_noise=True, psf=True, queue=None): """Get digital counts image from incoming *photons*. The resulting image is based on the incoming photons and dark current. We apply noise based on EMVA 1288 standard according to which the variance :math:`\sigma_y^2 = K^2 ( \sigma_e^2 + \sigma_d^2 ) + \sigma_q^2`, where :math:`K` is the system gain, :math:`\sigma_e^2` is the poisson- distributed shot noise variance, :math:`\sigma_d^2` is the normal distributed electronics noise variance and :math:`\sigma_q^2` is the quantization noise variance. If *shot_noise* is False don't apply it. If *amplifier_noise* is False don't apply it as well. If *psf* is False don't apply the point spread function. """ if self._last_input_shape != photons.shape: self._last_input_shape = photons.shape self._bin_factor = (photons.shape[0] / self.shape[0], photons.shape[1] / self.shape[1]) if queue is None: queue = cfg.OPENCL.queue # Shot noise # Adjust dark current for later binning and gain dark = float(self.dark_current) / self._bin_factor[0] / self._bin_factor[1] electrons = dark + gutil.get_host(photons) if self._bin_factor != (1, 1): if psf: sigma = (fwnm_to_sigma(self._bin_factor[0]), fwnm_to_sigma(self._bin_factor[1])) small = decimate(electrons, self.shape, sigma=sigma, queue=queue) else: small = bin_image(electrons, self.shape, queue=queue) electrons = gutil.get_host(small) if shot_noise: electrons = np.random.poisson(electrons) if amplifier_noise and self.amplifier_sigma > 0: # Add electronics noise electrons = np.random.normal(electrons, self.amplifier_sigma) counts = self.gain * electrons # Cut the values beyond the maximum represented grey value given by # bytes per pixel. counts[counts > self.max_grey_value] = self.max_grey_value # Apply quantization noise return counts.astype(self.dtype)
def _test(): shape = 8, 4 dtypes = ['i', 'u', 'f'] lengths = [2, 4, 8] types = [ np.dtype('{}{}'.format(dt, length)) for dt, length in itertools.product(dtypes, lengths) ] types.append(np.dtype('i1')) types.append(np.dtype('u1')) types += [np.dtype('c8'), np.dtype('c16')] for dtype in types: np_data = np.arange(shape[0] * shape[1]).reshape(shape).astype(dtype) # host -> Array cl_data = gu.get_array(np_data) np.testing.assert_equal(np_data, cl_data.get()) # Array -> Array res = gu.get_array(cl_data) np.testing.assert_equal(res.get(), cl_data.get()) # Array -> host host_data = gu.get_host(cl_data) np.testing.assert_equal(np_data, host_data) # host -> host host_data = gu.get_host(np_data) np.testing.assert_equal(np_data, host_data) if dtype.kind != 'c': # numpy -> Image and Image -> Array image = gu.get_image(np_data) back = gu.get_array(image).get() np.testing.assert_equal(back, np_data) # Image -> host host_data = gu.get_host(image) np.testing.assert_equal(host_data, np_data) # Array -> Image image = gu.get_image(cl_data) back = gu.get_array(image).get() np.testing.assert_equal(back, np_data) # Image -> Image image_2 = gu.get_image(image) back = gu.get_array(image_2).get() np.testing.assert_equal(back, np_data)
def _test(): shape = 8, 4 dtypes = ["i", "u", "f"] lengths = [2, 4, 8] types = [ np.dtype("{}{}".format(dt, length)) for dt, length in itertools.product(dtypes, lengths) ] types.append(np.dtype("i1")) types.append(np.dtype("u1")) types += [np.dtype("c8"), np.dtype("c16")] for dtype in types: np_data = np.arange(shape[0] * shape[1]).reshape(shape).astype(dtype) # host -> Array cl_data = gu.get_array(np_data) np.testing.assert_equal(np_data, cl_data.get()) # Array -> Array res = gu.get_array(cl_data) np.testing.assert_equal(res.get(), cl_data.get()) # Array -> host host_data = gu.get_host(cl_data) np.testing.assert_equal(np_data, host_data) # host -> host host_data = gu.get_host(np_data) np.testing.assert_equal(np_data, host_data) if gu.are_images_supported() and dtype.kind != "c": # numpy -> Image and Image -> Array image = gu.get_image(np_data) back = gu.get_array(image).get() np.testing.assert_equal(back, np_data) # Image -> host host_data = gu.get_host(image) np.testing.assert_equal(host_data, np_data) # Array -> Image image = gu.get_image(cl_data) back = gu.get_array(image).get() np.testing.assert_equal(back, np_data) # Image -> Image image_2 = gu.get_image(image) back = gu.get_array(image_2).get() np.testing.assert_equal(back, np_data)
def merge_tiles(tiles, num_tiles=None, outlier=(0, 0)): """Merge *tiles* which is a list to one large image. *num_tiles* is a tuple specifying the number of tiles as (y, x) or None, meaning there is equal number of tiles in both dimensions. The tiles must be stored in the row-major order. """ n, m = get_num_tiles(tiles, num_tiles=num_tiles) tile_shape = tiles[0].shape crop_shape = (tile_shape[0] - outlier[0], tile_shape[1] - outlier[1]) result = np.zeros((n * crop_shape[0], m * crop_shape[1]), dtype=tiles[0].dtype) for j in range(n): for i in range(m): tile = g_util.get_host(tiles[j * m + i])[outlier[0] / 2:tile_shape[0] - outlier[0] / 2, outlier[1] / 2:tile_shape[1] - outlier[1] / 2] result[j * crop_shape[0]:(j + 1) * crop_shape[0], i * crop_shape[1]:(i + 1) * crop_shape[1]] = tile return result
def test_fft(self): data = gpu_util.get_array( np.random.normal(100, 100, size=(4, 4)).astype(cfg.PRECISION.np_float) ) orig = gpu_util.get_host(data) data = ip.fft_2(data) ip.ifft_2(data) np.testing.assert_almost_equal(orig, data.get().real, decimal=4) # Test double precision default_syris_init(double_precision=True) data = gpu_util.get_array( np.random.normal(100, 100, size=(4, 4)).astype(cfg.PRECISION.np_float) ) gt = np.fft.fft2(data.get()) data = ip.fft_2(data) np.testing.assert_almost_equal(gt, data.get(), decimal=4) gt = np.fft.ifft2(data.get()) data = ip.ifft_2(data) np.testing.assert_almost_equal(gt, data.get(), decimal=4)
def make_motion(args): syris.init() n = 256 shape = (n, n) energies = np.arange(5, 30, 1) * q.keV bm, detector = make_devices(n, energies) mb = create_sample(n, detector.pixel_size, velocity=20 * q.mm / q.s) mb_2 = create_sample(n, detector.pixel_size, velocity=10 * q.mm / q.s) mb.material = get_material('pmma_5_30_kev.mat') mb_2.material = mb.material cube = make_cube( ) / q.m * 30 * detector.pixel_size + 0.1 * detector.pixel_size fov = detector.pixel_size * n circle = make_circle().magnitude * fov / 30000 + fov / 2 tr = Trajectory(circle, velocity=10 * q.um / q.s) glass = get_material('glass.mat') mesh = Mesh(cube, tr, material=glass) ex = Experiment([bm, mb, mb_2, mesh], bm, detector, 0 * q.m, energies) for sample in ex.samples: if sample != bm: sample.trajectory.bind(detector.pixel_size) if args.show_flat: show(get_flat(shape, energies, detector, bm), title='Counts') plt.show() if args.conduct: if args.output is not None and not os.path.exists(args.output): os.makedirs(args.output, mode=0o755) t_0 = 0 * q.s if args.num_images: t_1 = args.num_images / detector.camera.fps else: t_1 = ex.time st = time.time() mpl_im = None for i, proj in enumerate(ex.make_sequence(t_0, t_1)): image = get_host(proj) if args.show: if mpl_im is None: plt.figure() mpl_im = plt.imshow(image) plt.show(False) else: mpl_im.set_data(image) plt.draw() if args.output: path = os.path.join(args.output, 'projection_{:>05}.png').format(i) scipy.misc.imsave(path, image) print 'Maximum intensity:', image.max() print 'Duration: {} s'.format(time.time() - st) plt.show()
st = time.time() mpl_im = None # make projections for i, [data, filename] in enumerate( ex.make_tomography(NO_OF_IMAGES, THETA_MAX, PAUSE, NUM_REF_PER_BLOCK, NUM_PROJ_PER_BLOCK, NUM_DARK_IMG, start_frame=START_I, shape=shape)): if START_I <= i: image = get_host(data) msg = '===== COMPUTED {}' LOG.debug(msg.format(filename)) if PLOT_AND_PAUSE: show(image) plt.show() if OUTPUT: path_img = os.path.join(OUTPUT, filename) tf.imsave(path_img, image.astype(np.uint16)) path_log = os.path.join(OUTPUT, 'scan.log') ex.write_scan_log(path_log, NO_OF_IMAGES, NUM_REF_PER_BLOCK, NUM_PROJ_PER_BLOCK, THETA_MAX, NUM_DARK_IMG)