def filter_func(images: Images, diff=None, radius=_default_radius, axis=0, cores=None, progress=None): """ :param images: Input data :param diff: Pixel value difference above which to crop bright pixels :param radius: Size of the median filter to apply :param cores: The number of cores that will be used to process the data. :return: The processed 3D numpy.ndarray """ if not utility.multiprocessing_necessary(images.data.shape, cores): cores = 1 if diff and radius and diff > 0 and radius > 0: data = images.projections if axis == 0 else images.sinograms func = psm.create_partial(OutliersISISFilter._execute, psm.return_fwd_func, diff=diff, radius=radius) psm.execute( data, func, cores, progress=progress, msg=f"Outliers with threshold {diff} and kernel {radius}") return images
def find_center(images: Images, progress: Progress) -> Tuple[ScalarCoR, Degrees]: # assume the ROI is the full image, i.e. the slices are ALL rows of the image slices = np.arange(images.height) with pu.temp_shared_array((images.height, )) as shift: # this is the area that is looked into for the shift after overlapping the images search_range = get_search_range(images.width) func = shared_mem.create_partial(do_search, shared_mem.fwd_index_only, image_width=images.width, p0=images.projection(0), p180=np.fliplr( images.proj180deg.data[0]), search_range=search_range) shared_mem.execute(shift, func, progress=progress, msg="Finding correlation on row") par = np.polyfit(slices, shift, deg=1) m = par[0] q = par[1] LOG.debug(f"m={m}, q={q}") theta = Degrees(np.rad2deg(np.arctan(0.5 * m))) offset = np.round(m * images.height * 0.5 + q) * 0.5 LOG.info(f"found offset: {-offset} and tilt {theta}") return ScalarCoR(images.h_middle + -offset), theta
def filter_func(images, snr=3, la_size=61, cores=None, chunksize=None, progress=None): f = psm.create_partial(remove_large_stripe, psm.return_fwd_func, snr=snr, size=la_size) psm.execute(images.data, f, cores, chunksize, progress) return images
def filter_func(images, snr=3, size=61, cores=None, chunksize=None, progress=None): f = psm.create_partial(remove_unresponsive_and_fluctuating_stripe, psm.return_fwd_func, snr=snr, size=size) psm.execute(images.data, f, cores, chunksize, progress) return images
def filter_func(images, order=1, sigmax=3, sigmay=3, cores=None, chunksize=None, progress=None): f = psm.create_partial(remove_stripe_based_sorting_fitting, psm.return_fwd_func, order=order, sigmax=sigmax, sigmay=sigmay) psm.execute(images.data, f, cores, chunksize, progress) return images
def filter_func(images, sigma=3, size=21, window_dim=1, filtering_dim=1, cores=None, chunksize=None, progress=None): if filtering_dim == 1: f = psm.create_partial(remove_stripe_based_filtering_sorting, psm.return_fwd_func, sigma=sigma, size=size, dim=window_dim) else: f = psm.create_partial(remove_stripe_based_2d_filtering_sorting, psm.return_fwd_func, sigma=sigma, size=size, dim=window_dim) psm.execute(images.data, f, cores, chunksize, progress) return images
def filter_func(images, snr=3, la_size=61, sm_size=21, dim=1, cores=None, chunksize=None, progress=None): f = psm.create_partial(remove_all_stripe, psm.return_fwd_func, snr=snr, la_size=la_size, sm_size=sm_size, dim=dim) psm.execute(images.data, f, cores, chunksize, progress) return images
def test_memory_fwd_func(self): """ Expected behaviour for the filter is to be done in place without using more memory. In reality the memory is increased by about 40MB (4 April 2017), but this could change in the future. The reason why a 10% window is given on the expected size is to account for any library imports that may happen. This will still capture if the data is doubled, which is the main goal. """ # create data as shared array img, _ = th.generate_shared_array_and_copy() add_arg = 5 expected = img + add_arg assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = psm.create_partial(return_from_func, fwd_func=psm.return_fwd_func, add_arg=add_arg) cached_memory = get_memory_usage_linux(kb=True)[0] # execute parallel img = psm.execute(img, f) self.assertLess( get_memory_usage_linux(kb=True)[0], cached_memory * 1.1) # compare results npt.assert_equal(img, expected)
def test_memory_fwd_func_inplace(self): # create data as shared array img, _ = th.generate_shared_array_and_copy() add_arg = 5 expected = img + add_arg assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = psm.create_partial(add_inplace, fwd_func=psm.inplace, add_arg=add_arg) cached_memory = get_memory_usage_linux(kb=True)[0] # execute parallel img = psm.execute(img, f) self.assertLess( get_memory_usage_linux(kb=True)[0], cached_memory * 1.1) # compare results npt.assert_equal(img, expected)
def _execute(data, size, mode, order, cores=None, chunksize=None, progress=None): log = getLogger(__name__) progress = Progress.ensure_instance(progress, task_name='Gaussian filter') # Parallel CPU version of the Gaussian filter # create the partial function to forward the parameters f = psm.create_partial(scipy_ndimage.gaussian_filter, fwd_func=psm.return_fwd_func, sigma=size, mode=mode, order=order) log.info("Starting PARALLEL gaussian filter, with pixel data type: {0}, " "filter size/width: {1}.".format(data.dtype, size)) progress.update() data = psm.execute(data, f, cores, chunksize, progress, msg="Gaussian filter") progress.mark_complete() log.info("Finished gaussian filter, with pixel data type: {0}, " "filter size/width: {1}.".format(data.dtype, size))
def _execute(data, angle: float, cores: int, chunksize: int, progress: Progress): progress = Progress.ensure_instance(progress, task_name='Rotate Stack') with progress: f = psm.create_partial(_rotate_image_inplace, fwd_func=psm.inplace, angle=angle) data = psm.execute(data, f, cores=cores, chunksize=chunksize, progress=progress, msg=f"Rotating by {angle} degrees") return data
def test_fwd_func(self): # create data as shared array img, _ = th.generate_shared_array_and_copy() add_arg = 5 expected = img + add_arg assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = psm.create_partial(return_from_func, fwd_func=psm.return_fwd_func, add_arg=add_arg) # execute parallel img = psm.execute(img, f) # compare results npt.assert_equal(img, expected)
def test_fail_with_normal_array_fwd_func(self): # create data as shared array img = th.gen_img_numpy_rand((11, 10, 10)) orig = np.copy(img) add_arg = 5 expected = img + add_arg assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = psm.create_partial(return_from_func, fwd_func=psm.return_fwd_func, add_arg=add_arg) # execute parallel res = psm.execute(img, f) # compare results th.assert_not_equals(res, expected) npt.assert_equal(img, orig)
def _execute(data, size, mode, cores=None, chunksize=None, progress=None): log = getLogger(__name__) progress = Progress.ensure_instance(progress, task_name='Median filter') # create the partial function to forward the parameters f = psm.create_partial(scipy_ndimage.median_filter, fwd_func=psm.return_fwd_func, size=size, mode=mode) with progress: log.info("PARALLEL median filter, with pixel data type: {0}, filter " "size/width: {1}.".format(data.dtype, size)) progress.update() data = psm.execute(data, f, cores, chunksize, progress, msg="Median filter") return data