def test_fwd_func_second_2d(self): # create data as shared array img = th.generate_shared_array() img2nd, orig_2nd = th.generate_shared_array_and_copy() img2nd = img2nd[0] # make sure it hasnt changed the original array expected = img + img2nd + 5 assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(add_inplace, fwd_function=ptsm.inplace_second_2d, add_arg=5) # execute parallel ptsm.execute(img, img2nd, f) # compare results npt.assert_equal(img, expected) npt.assert_equal(img2nd, orig_2nd[0])
def test_memory_fwd_func_inplace(self): # create data as shared array img = th.generate_shared_array() img2nd, orig_2nd = th.generate_shared_array_and_copy() # make sure it hasnt changed the original array expected = img + img2nd + 5 assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(add_inplace, fwd_function=ptsm.inplace, add_arg=5) cached_memory = get_memory_usage_linux(kb=True)[0] # execute parallel ptsm.execute(img, img2nd, f) self.assertLess( get_memory_usage_linux(kb=True)[0], cached_memory * 1.1) # compare results npt.assert_equal(img, expected) npt.assert_equal(img2nd, orig_2nd)
def _execute(data, air_region: SensibleROI, cores=None, chunksize=None, progress=None): log = getLogger(__name__) with progress: progress.update(msg="Normalization by air region") if isinstance(air_region, list): air_region = SensibleROI.from_list(air_region) # initialise same number of air sums img_num = data.shape[0] with pu.temp_shared_array((img_num, 1, 1), data.dtype) as air_sums: # turn into a 1D array, from the 3D that is returned air_sums = air_sums.reshape(img_num) calc_sums_partial = ptsm.create_partial(_calc_sum, fwd_function=ptsm.return_to_second, air_left=air_region.left, air_top=air_region.top, air_right=air_region.right, air_bottom=air_region.bottom) data, air_sums = ptsm.execute(data, air_sums, calc_sums_partial, cores, chunksize, progress=progress) air_sums_partial = ptsm.create_partial(_divide_by_air_sum, fwd_function=ptsm.inplace) data, air_sums = ptsm.execute(data, air_sums, air_sums_partial, cores, chunksize, progress=progress) avg = np.average(air_sums) max_avg = np.max(air_sums) / avg min_avg = np.min(air_sums) / avg log.info(f"Normalization by air region. " f"Average: {avg}, max ratio: {max_avg}, min ratio: {min_avg}.")
def test_fail_with_normal_array_fwd_func_second_2d(self): # shape of 11 forces the execution to be parallel img = th.gen_img_numpy_rand((11, 10, 10)) orig_img = np.copy(img) img2nd = th.gen_img_numpy_rand((11, 10, 10)) orig_img2nd = np.copy(img2nd) img2nd = img2nd[0] # get the expected as usual expected = img + img2nd # make sure it hasnt changed the original array assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(add_inplace, fwd_function=ptsm.inplace_second_2d, add_arg=5) # execute parallel ptsm.execute(img, img2nd, f) # compare results th.assert_not_equals(img, expected) th.assert_not_equals(img2nd, expected) npt.assert_equal(img, orig_img) npt.assert_equal(img2nd, orig_img2nd[0])
def filter_func(images: Images, rebin_param=0.5, mode=None, cores=None, chunksize=None, progress=None) -> Images: """ :param images: Sample data which is to be processed. Expects radiograms :param rebin_param: int, float or tuple int - Percentage of current size. float - Fraction of current size. tuple - Size of the output image (x, y). :param mode: Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic'). :param cores: The number of cores that will be used to process the data. :param chunksize: The number of chunks that each worker will receive. :return: The processed 3D numpy.ndarray """ h.check_data_stack(images) if isinstance(rebin_param, tuple): param_valid = rebin_param[0] > 0 and rebin_param[1] > 0 else: param_valid = rebin_param > 0 if param_valid: sample = images.data sample_name: Optional[str] if images.memory_filename is not None: sample_name = images.memory_filename images.free_memory(delete_filename=False) else: # this case is true when the filter preview is being calculated sample_name = None # force single core execution as it's faster for a single image cores = 1 empty_resized_data = _create_reshaped_array( sample.shape, sample.dtype, rebin_param, sample_name) f = ptsm.create_partial(skimage.transform.resize, ptsm.return_to_second_but_dont_use_it, mode=mode, output_shape=empty_resized_data.shape[1:]) ptsm.execute(sample, empty_resized_data, f, cores, chunksize, progress=progress, msg="Applying Rebin") images.data = empty_resized_data return images
def filter_func(images: Images, rebin_param=0.5, mode=None, cores=None, chunksize=None, progress=None) -> Images: """ :param images: Sample data which is to be processed. Expects radiograms :param rebin_param: int, float or tuple int - Percentage of current size. float - Fraction of current size. tuple - Size of the output image (x, y). :param mode: Interpolation to use for re-sizing ('nearest', 'lanczos', 'bilinear', 'bicubic' or 'cubic'). :param cores: The number of cores that will be used to process the data. :param chunksize: The number of chunks that each worker will receive. :return: The processed 3D numpy.ndarray """ h.check_data_stack(images) if isinstance(rebin_param, tuple): param_valid = rebin_param[0] > 0 and rebin_param[1] > 0 else: param_valid = rebin_param > 0 if param_valid: sample = images.data sample_name: Optional[str] # allocate output first BEFORE freeing the original data, # otherwise it's possible to free and then fail allocation for output # at which point you're left with no data empty_resized_data = _create_reshaped_array(images, rebin_param) f = ptsm.create_partial(skimage.transform.resize, ptsm.return_to_second_but_dont_use_it, mode=mode, output_shape=empty_resized_data.shape[1:]) ptsm.execute(sample, empty_resized_data, f, cores, chunksize, progress=progress, msg="Applying Rebin") images.data = empty_resized_data return images
def test_memory_return_to_second(self): # create data as shared array img, orig_img = th.generate_shared_array_and_copy() img2nd = th.generate_shared_array() # make sure it hasnt changed the original array expected = img + img2nd + 5 assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(return_from_func, fwd_function=ptsm.return_to_second, add_arg=5) # execute parallel cached_memory = get_memory_usage_linux(kb=True)[0] res1, res2 = ptsm.execute(img, img2nd, f) self.assertLess( get_memory_usage_linux(kb=True)[0], cached_memory * 1.1) # compare results npt.assert_equal(res2, expected) npt.assert_equal(res1, orig_img)
def test_fail_with_normal_array_return_to_second(self): """ This test does not use shared arrays and will not change the data. This behaviour is intended and is """ # create data as normal nd array img = th.gen_img_numpy_rand() img2nd = th.gen_img_numpy_rand() # get the expected as usual expected = img + img2nd # make sure it hasnt changed the original array assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(return_from_func, fwd_function=ptsm.return_to_second, add_arg=5) # execute parallel res1, res2 = ptsm.execute(img, img2nd, f) # compare results npt.assert_equal(res1, img) npt.assert_equal(res2, img2nd) th.assert_not_equals(res2, expected)
def test_fail_with_normal_array_return_to_first(self): # create data as normal nd array img = th.gen_img_numpy_rand() img2nd = th.gen_img_numpy_rand() # get the expected as usual expected = img + img2nd # make sure it hasnt changed the original array assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(return_from_func, fwd_function=ptsm.return_to_first, add_arg=5) # execute parallel res1, res2 = ptsm.execute(img, img2nd, f) # compare results npt.assert_equal(res1, img) npt.assert_equal(res2, img2nd) th.assert_not_equals(res1, expected)
def create_factors(data: np.ndarray, roi=None, cores=None, chunksize=None, progress=None): """ Calculate the scale factors as the mean of the ROI :param data: The data stack from which the scale factors will be calculated :param roi: Region of interest for which the scale factors will be calculated :param cores: Number of cores that will perform the calculation :param chunksize: How many chunks of work each core will receive :return: The scale factor for each image. """ progress = Progress.ensure_instance(progress, num_steps=data.shape[0]) with progress: img_num = data.shape[0] # make sure to clean up if for some reason the scale factor array still exists with pu.temp_shared_array((img_num, 1, 1)) as scale_factors: # turn into a 1D array, from the 3D that is returned scale_factors = scale_factors.reshape(img_num) # calculate the scale factor from original image calc_sums_partial = ptsm.create_partial(_calc_avg, fwd_function=ptsm.return_to_second, roi_left=roi[0] if roi else 0, roi_top=roi[1] if roi else 0, roi_right=roi[2] if roi else data[0].shape[1] - 1, roi_bottom=roi[3] if roi else data[0].shape[0] - 1) data, scale_factors = ptsm.execute(data, scale_factors, calc_sums_partial, cores, chunksize) return scale_factors
def _execute_par(data: numpy.ndarray, resized_data, mode, cores=None, chunksize=None, progress=None): f = ptsm.create_partial(skimage.transform.resize, ptsm.return_to_second_but_dont_use_it, mode=mode, output_shape=resized_data.shape[1:]) ptsm.execute(data, resized_data, f, cores, chunksize, progress=progress, msg="Applying Rebin") return resized_data
def _execute(data, flat=None, dark=None, cores=None, chunksize=None, progress=None): """ A benchmark justifying the current implementation, performed on 500x2048x2048 images. #1 Separate runs Subtract (sequential with np.subtract(data, dark, out=data)) - 13s Divide (par) - 1.15s #2 Separate parallel runs Subtract (par) - 5.5s Divide (par) - 1.15s #3 Added subtract into _divide so that it is: np.true_divide( np.subtract(data, dark, out=data), norm_divide, out=data) Subtract then divide (par) - 55s """ with progress: progress.update(msg="Applying background correction") with pu.temp_shared_array((1, data.shape[1], data.shape[2]), data.dtype) as norm_divide: # remove a dimension, I found this to be the easiest way to do it norm_divide = norm_divide.reshape(data.shape[1], data.shape[2]) # subtract dark from flat and copy into shared array with [:] norm_divide[:] = np.subtract(flat, dark) # prevent divide-by-zero issues, and negative pixels make no sense norm_divide[norm_divide == 0] = MINIMUM_PIXEL_VALUE # subtract the dark from all images f = ptsm.create_partial(_subtract, fwd_function=ptsm.inplace_second_2d) data, dark = ptsm.execute(data, dark, f, cores, chunksize, progress=progress) # divide the data by (flat - dark) f = ptsm.create_partial(_divide, fwd_function=ptsm.inplace_second_2d) data, norm_divide = ptsm.execute(data, norm_divide, f, cores, chunksize, progress=progress) return data
def apply_factor(data: np.ndarray, scale_factors, cores=None, chunksize=None, progress=None): """ This will apply the scale factors to the data stack. :param data: the data stack to which the scale factors will be applied. :param scale_factors: The scale factors to be applied """ # scale up the data progress = Progress.ensure_instance(progress, num_steps=data.shape[0]) with progress: scale_up_partial = ptsm.create_partial(_scale_inplace, fwd_function=ptsm.inplace_second_2d) # scale up all images by the mean sum of all of them, this will keep the # contrast the same as from the region of interest data, scale_factors = ptsm.execute(data, [scale_factors.mean()], scale_up_partial, cores, chunksize, progress) return data
def filter_func(images: Images, cores=None, chunksize=None, progress=None) -> Images: counts = images.counts() if counts is None: raise RuntimeError("No loaded log values for this stack.") counts_val = counts.value / counts.value[0] div_partial = ptsm.create_partial(_divide_by_counts, fwd_function=ptsm.inplace) images, _ = ptsm.execute(images.data, counts_val, div_partial, cores, chunksize, progress=progress) return images
def test_return_to_first(self): # create data as shared array img = th.generate_shared_array() img2nd, orig_2nd = th.generate_shared_array_and_copy() # make sure it hasnt changed the original array expected = img + img2nd + 5 assert expected[0, 0, 0] != img[0, 0, 0] assert expected[1, 0, 0] != img[1, 0, 0] assert expected[0, 4, 0] != img[0, 4, 0] assert expected[6, 0, 1] != img[6, 0, 1] # create partial f = ptsm.create_partial(return_from_func, fwd_function=ptsm.return_to_first, add_arg=5) # execute parallel res1, res2 = ptsm.execute(img, img2nd, f) # compare results npt.assert_equal(res1, expected) npt.assert_equal(res2, orig_2nd)