def test_contour_area_filter_correctly_filters_beam_images(): for i, image in enumerate(beam_images): image_passes = contour_area_filter(image) if image is beam_image_04: assert (image_passes == False) else: assert (image_passes == True)
def test_sim_det_device_interfaces_with_psbeam_properly(sim_det_01): det = sim_det_01 im_filter = lambda image: contour_area_filter(image, uint_mode="clip") for i in range(10): try: image = det.image.image idx = det.image.array_counter.value % 4 cent, bbox = detect(image, uint_mode="clip", filters=im_filter) assert (idx != 3 or i == 0) except NoBeamDetected: assert (idx == 0)
def test_sim_det_interfaces_with_bluesky_correctly(sim_det_01, sim_det_02, RE): global_data = None det_01 = sim_det_01 det_02 = sim_det_02 im_filter = lambda image: contour_area_filter(image, uint_mode="clip") # Fake event storage array_data = [] array_count = [] col_images = collector(det_01.image.array_data.name, array_data) col_count = collector(det_01.image.array_counter.name, array_count) # A test plan that just reads the data 10 times def test_plan(det): read_data = yield from measure([det], num=10) # Include the counter in the read det_01.image.read_attrs = ["array_data", "array_counter"] # Run the plan RE(run_wrapper(test_plan(det_01)), subs={'event': [col_count, col_images]}) # Check the each image against the count im_filter = lambda image: contour_area_filter(image, uint_mode="clip") for count, array in zip(array_count, array_data): try: array_size = [int(val) for val in det_01.image.array_size.get()] if array_size == [0, 0, 0]: raise RuntimeError('Invalid image') if array_size[-1] == 0: array_size = array_size[:-1] image = np.array(array).reshape(array_size) idx = count - 1 cent, bbox = detect(image, uint_mode="clip", filters=im_filter) assert (idx % 4 != 3 or idx == 0) except NoBeamDetected: assert (idx % 4 == 3)
def characterize(detector, array_signal_str, size_signal_str, num=10, delay=None, filters=None, drop_missing=True, filter_kernel=(9,9), resize=1.0, uint_mode="scale", min_area=100, filter_factor=(9,9), min_area_factor=3, kernel=(9,9), thresh_factor=3, thresh_mode="otsu", md=None, **kwargs): """ Characterizes the beam profile by computing various metrics and statistics of the beam using the inputted detector. The function performs 'num' reads on the array_data field of the detector, optionally filtering shots until 'num' shots have been collected, and then runs the processing pipeline on each of the resulting arrays. The processing pipeline computes the contours of the image, from which the area, length, width, centroid and circularity of the contour is computed. Additionally, the sum and mean intensity values are computed of both the preprocessed image and the raw image. Once the pipeline has been finished processing for all 'num' images, the mean and standard deviation of each statistic is computed, giving a total 20 entries to the stats dictionary. Computed Statistics ------------------- sum_mn_raw Mean of the sum of the raw image intensities. sum_std_raw Standard deviation of sum of the raw image pixel intensities. sum_mn_prep Mean of the sum of the preprocessed image pixel intensities. sum_std_prep Standard deviation of the sum of the preprocessed image pixel intensities. mean_mn_raw Mean of the mean of the raw image pixel intensities. mean_std_raw Standard deviation of the mean of the raw image pixel intensities. mean_mn_prep Mean of the mean of the preprocessed image pixel intensities. mean_std_prep Standard deviation of the mean of the preprocessed image pixel intensities. area_mn Mean of the area of the contour of the beam. area_std Standard deviation of area of the contour of the beam. centroid_x_mn Mean of the contour centroid x. centroid_x_std Standard deviation of the contour centroid x. centroid_y_mn Mean of the contour centroid y. centroid_y_std Standard deviation of the contour centroid y. length_mn Mean of the contour length. length_std Standard deviation of the contour length. width_mn Mean of the contour width. width_std Standard deviation of the contour width. match_mn Mean score of contour similarity to a binary image of a circle. match_std Standard deviation of score of contour similarity to a binary image of a circle. Parameters ---------- detector : detector obj Detector object that contains the components for the array data and image shape data. array_signal_str : str The string name of the array_data signal. size_signal_str : str The string name of the signal that will provide the shape of the image. num : int Number of measurements that need to pass the filters. delay : float Minimum time between consecutive reads of the detectors. filters : dict, optional Key, callable pairs of event keys and single input functions that evaluate to True or False. drop_missing : bool, optional Choice to include events where event keys are missing. filter_kernel : tuple, optional Kernel to use when gaussian blurring in the contour filter. resize : float, optional How much to resize the image by before doing any calculations. uint_mode : str, optional Conversion mode to use when converting to uint8. min_area : float, optional Minimum area of the otsu thresholded beam. filter_factor : float Factor to pass to the filter mean threshold. min_area_factor : float The amount to scale down the area for comparison with the mean threshold contour area. kernel : tuple, optional Size of kernel to use when running the gaussian filter. thresh_mode : str, optional Thresholding mode to use. For extended documentation see preprocessing.threshold_image. Valid modes are: ['mean', 'top', 'bottom', 'adaptive', 'otsu'] thresh_factor : int or float, optional Factor to pass to the mean threshold. md : str, optional How much meta data to include in the output dict. Valid options are: [None, 'basic', 'all'] Note: The 'all' option is for debugging purposes and should not be used in production. Returns ------- results_dict : dict Dictionary containing the statistics obtained from the array data, and optionally some meta-data. """ # Get the image and size signals array_signal = getattr(detector, array_signal_str) size_signal = getattr(detector, size_signal_str) # Apply the default filter if filters is None: filters = dict() array_signal_str_full = detector.name + "_" + array_signal_str.replace( ".", "_") filters[array_signal_str_full] = lambda image : contour_area_filter( to_image(image, detector, size_signal), kernel=filter_kernel, min_area=min_area, min_area_factor=min_area_factor, factor=filter_factor, uint_mode=uint_mode) # Get images for all the shots data = yield from measure([array_signal], num=num, delay=delay, filters=filters, drop_missing=drop_missing) # Process the data results = process_det_data(data, array_signal, size_signal, kernel=kernel, uint_mode=uint_mode, thresh_mode=thresh_mode, thresh_factor=thresh_factor, resize=resize, md=md, **kwargs) return results
def test_contour_area_filter_works_on_hx2_images(): for key, img in images_hx2.items(): image_passes = contour_area_filter(img, kernel=(19, 19), factor=2) assert (image_passes == bool(int(key[-3:])) and True)
def test_contour_area_filter_returns_false_for_small_areas(): array_test = np.zeros((10, 10), dtype=np.uint8) array_test[:, 0] = 1 image_passes = contour_area_filter(array_test, min_area=20) assert (image_passes == False)
def test_contour_area_filter_returns_false_for_empty_images(): image_passes = contour_area_filter(np.zeros((10, 10), dtype=np.uint8)) assert (image_passes == False)