def test_calc_compare_histogram_CORREL(self):
        image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
        image_2 = image_1
        image_3 = np.full((200, 200, 3), [100, 0, 0], dtype=np.uint8)

        matching_result = TSEImageUtils.calc_compare_hsv_histogram(
            image_1, image_2, cv2.cv.CV_COMP_CORREL)
        non_matching_result = TSEImageUtils.calc_compare_hsv_histogram(
            image_1, image_3, cv2.cv.CV_COMP_CORREL)

        # Matching result should be greater than a non-matching result for CORREL matching method.
        assert_true(matching_result > non_matching_result)
    def test_calc_template_match_compare_cv2_score_CCORR(self):
        image_1 = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)
        image_2 = image_1
        image_3 = np.full((200, 200, 3), [200, 0, 0], dtype=np.uint8)

        matching_result = TSEImageUtils.calc_template_match_compare_cv2_score(
            image_1, image_2, cv2.cv.CV_TM_CCORR_NORMED)
        non_matching_result = TSEImageUtils.calc_template_match_compare_cv2_score(
            image_1, image_3, cv2.cv.CV_TM_CCORR_NORMED)

        # For CCORR, we would expect that a perfectly matching image will score HIGHER than a non-matching image
        assert_true(matching_result > non_matching_result)
    def test_calc_template_match_compare_cv2_score_SQDIFF(self):
        image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
        image_2 = image_1
        image_3 = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)

        # Check that for perfectly matching images, we get a score of exactly 0.
        assert_equal(
            TSEImageUtils.calc_template_match_compare_cv2_score(
                image_1, image_2, cv2.cv.CV_TM_SQDIFF), 0)

        # Check that for non-matching images, we get a score > 0.
        assert_true(
            TSEImageUtils.calc_template_match_compare_cv2_score(
                image_1, image_3, cv2.cv.CV_TM_SQDIFF) > 0)
    def test_calc_euclidean_distance_cv2_norm(self):
        image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
        image_2 = image_1
        image_3 = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)

        # Check that for perfectly matching images, we get a score of exactly 0.
        assert_equal(
            TSEImageUtils.calc_euclidean_distance_cv2_norm(image_1, image_2),
            0)

        # Check that for non-matching images, we get a score > 0.
        assert_true(
            TSEImageUtils.calc_euclidean_distance_cv2_norm(image_1, image_3) >
            0)
    def test_calc_compare_histogram_CHISQR(self):
        image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
        image_2 = image_1
        image_3 = np.full((200, 200, 3), [100, 0, 0], dtype=np.uint8)

        # Check that for perfectly matching images, we get a score of exactly 0.
        assert_equal(
            TSEImageUtils.calc_compare_hsv_histogram(image_1, image_2,
                                                     cv2.cv.CV_COMP_CHISQR), 0)

        # Check that for non-matching images, we get a score > 0.
        assert_true(
            TSEImageUtils.calc_compare_hsv_histogram(
                image_1, image_3, cv2.cv.CV_COMP_CHISQR) > 0)
    def test_reshape_match_images_same_shape(self):

        image_target_shape = np.zeros((200, 200, 3), dtype=np.uint8)

        image_current_shape = np.zeros((200, 200, 3), dtype=np.uint8)

        image_reshaped = TSEImageUtils.reshape_match_images(
            image_current_shape, image_target_shape)

        assert_true(np.array_equal(image_target_shape, image_reshaped))
    def test_calc_template_match_compare_cv2_score_scaled_CCORR_NORMED(self):

        # Create a sample test image that is empty.
        original_image = np.full((400, 400, 3), [0, 200, 0], dtype=np.uint8)

        # Calculate the scale factor (MAKING SURE TO SUBTRACT '1' from the max height/width to account for array index out of bounds issue)
        scale_factor_width = TSEGeometry.calc_measure_scale_factor(
            200, (400 - 1))

        # Calculate the scaled indices to identify the pixels in the larger image that we will want to make GREEN to provide evidence for the test succeeding.
        original_image_scaled_indices = np.rint(
            (np.arange(0, 200) * scale_factor_width)).astype(int)

        rows_cols_cartesian_product = np.hsplit(
            TSEDataUtils.calc_cartesian_product(
                [original_image_scaled_indices,
                 original_image_scaled_indices]), 2)

        rows_to_extract = rows_cols_cartesian_product[0].astype(int)
        cols_to_extract = rows_cols_cartesian_product[1].astype(int)

        # We now want to set each fo the pixels THAT WE EXPECT TO BE EXTRACTED BY THE TEST to GREEN to show that the test has passed.
        original_image[rows_to_extract, cols_to_extract] = [0, 200, 0]

        # Once we have performed the pixel extraction, we expect that all of the pixels returned will be GREEN (based ont he setup above)
        matching_image = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)

        non_matching_image = np.full((200, 200, 3), [200, 0, 0],
                                     dtype=np.uint8)

        # Check that for perfectly matching images, we get a score of exactly 1.0 (normalised - higher score = better match).
        assert_equal(
            TSEImageUtils.calc_template_match_compare_cv2_score_scaled(
                matching_image, original_image, cv2.cv.CV_TM_CCORR_NORMED),
            1.0)

        # Check that for non-matching images, we get a score < 1.0 (should get a smaller score for non-matchign images).
        assert_true(
            TSEImageUtils.calc_template_match_compare_cv2_score_scaled(
                non_matching_image, original_image, cv2.cv.CV_TM_CCORR_NORMED)
            < 1.0)
    def test_convert_hsv_and_remove_luminance(self):

        image_three_channel = np.full((50, 50, 3), [100, 50, 200],
                                      dtype=np.uint8)

        image_two_channel = TSEImageUtils.convert_hsv_and_remove_luminance(
            image_three_channel)

        # Test that the colour space has been converted to HSV, and the 'V' channel has been set to 0 (i.e. remove it)
        assert_true((image_two_channel == [170, 191, 0]).all(
        ), "Converted colour to HSV and 'V' stripped should equal [170, 191, 0]"
                    )
    def test_calc_scaled_image_pixel_dimension_coordinates(self):

        dimension_max_val = 20

        scale_factor = 0.5

        expected_result_float = np.arange(0, dimension_max_val) * scale_factor

        assert_true(
            np.array_equal(
                expected_result_float,
                TSEImageUtils.calc_scaled_image_pixel_dimension_coordinates(
                    dimension_max_val, scale_factor, round=False)))
    def test_extract_image_sub_window(self):

        test_image = np.zeros((200, 200, 3), dtype=np.uint8)

        # Set 50x50px section to Red (top-left corner)
        test_image[0:50, 0:50] = [0, 0, 200]

        expected_result = np.full((50, 50, 3), [0, 0, 200], dtype=np.uint8)

        extracted_slice = TSEImageUtils.extract_image_sub_window(
            test_image, TSEPoint(0, 0), TSEPoint(50, 50))

        assert_true(np.array_equal(expected_result, extracted_slice))
    def test_calc_scaled_image_pixel_dimension_coordinates_rounded(self):

        dimension_max_val = 20

        scale_factor = 0.5

        expected_result_rounded = np.rint(
            np.arange(0, dimension_max_val) * scale_factor)

        assert_true(
            np.array_equal(
                expected_result_rounded,
                TSEImageUtils.calc_scaled_image_pixel_dimension_coordinates(
                    dimension_max_val, scale_factor, round=True)))
    def __init__(self, image_one_file_path, image_two_file_path,
                 calib_data_file_path, plot_axis):

        self._image_one_file_path = image_one_file_path
        self._image_two_file_path = image_two_file_path

        # Find the last instance of '/' in the file path, and grab the image name from the split array.
        self._image_one_file_name = image_one_file_path.rsplit('/', 1)[1]
        self._image_two_file_name = image_two_file_path.rsplit('/', 1)[1]

        self._raw_img1 = cv2.imread(image_one_file_path, cv2.IMREAD_COLOR)
        self._raw_img2 = cv2.imread(image_two_file_path, cv2.IMREAD_COLOR)

        self._hsv_img1 = TSEImageUtils.convert_hsv_and_remove_luminance(
            self._raw_img1)
        self._hsv_img2 = TSEImageUtils.convert_hsv_and_remove_luminance(
            self._raw_img2)

        self._calibration_lookup = self.load_calibration_data(
            calib_data_file_path)
        self._calib_data_file_path = calib_data_file_path

        self._plot_axis = plot_axis
    def test_calc_ed_template_match_score_scaled_slow_fix_96(self):

        original_image_1 = np.zeros((400, 400, 3), dtype=np.uint8)
        original_image_2 = np.full((400, 400, 3), [200, 0, 0], dtype=np.uint8)
        original_image_3 = np.full((400, 400, 3), [0, 200, 0], dtype=np.uint8)
        original_image_4 = np.full((400, 400, 3), [0, 0, 200], dtype=np.uint8)

        # Notice template patch is half the size of the original. We can therefore scale it up.
        matching_image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
        matching_image_2 = np.full((200, 200, 3), [200, 0, 0], dtype=np.uint8)
        matching_image_3 = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)
        matching_image_4 = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)

        non_matching_image = np.full((200, 200, 3), [0, 0, 200],
                                     dtype=np.uint8)

        # Check that for perfectly matching images, we get a score of exactly 0.
        assert_equal(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                matching_image_1, original_image_1), 0)
        assert_equal(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                matching_image_2, original_image_2), 0)
        assert_equal(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                matching_image_3, original_image_3), 0)
        assert_equal(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                matching_image_4, original_image_4), 0)

        # Check that for non-matching images, we get a score > 0.
        assert_true(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                non_matching_image, original_image_1) > 0)
        assert_true(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                non_matching_image, original_image_2) > 0)
        assert_true(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                non_matching_image, original_image_3) > 0)

        # As the "non-matching" image has the same pixel value as "original_image_4", we WOULD EXPECT A MATCH.
        assert_equal(
            TSEImageUtils.calc_ed_template_match_score_scaled_slow(
                non_matching_image, original_image_4), 0)
    def test_scale_hsv_image_no_interpolation_auto(self):

        original_image = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)

        # Set the centre pixel fo the original image to a different colour for comparison once scaling is complete.
        original_image[100, 100] = [200, 0, 0]

        larger_target_image = np.zeros((400, 400, 3), dtype=np.uint8)

        scaled_result = TSEImageUtils.scale_image_no_interpolation_auto(
            original_image, larger_target_image)

        # We would expect the centre pixel of the scaled image to be RED, as in the original non-scaled image this was set to RED.
        assert_true(np.array_equal(scaled_result[200, 200], [200, 0, 0]))

        # We would expect all other pixels to still be GREEN.
        assert_true(np.array_equal(scaled_result[0, 0], [0, 200, 0]))
    def test_extract_rows_cols_pixels_image(self):

        required_rows = [1, 100]
        required_cols = [10, 20]

        image_target_shape = np.zeros((200, 200, 3), dtype=np.uint8)

        image_target_shape[1, 10] = [0, 0, 200]
        image_target_shape[1, 20] = [0, 0, 200]

        image_target_shape[100, 10] = [0, 0, 200]
        image_target_shape[100, 20] = [0, 0, 200]

        returned_image = TSEImageUtils.extract_rows_cols_pixels_image(
            required_rows, required_cols, image_target_shape)

        # '.all()' loops through every element in 'returned_image' and checks that they equal '[0, 0, 200]'
        assert_true((returned_image == [0, 0, 200]).all())
    def test_scale_image_interpolation_auto(self):

        original_image = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)

        # Set the centre pixel fo the original image to a different colour for comparison once scaling is complete.
        original_image[100, 100] = [200, 0, 0]

        larger_target_image = np.zeros((400, 400, 3), dtype=np.uint8)

        scaled_result = TSEImageUtils.scale_image_interpolation_auto(
            original_image, larger_target_image)

        # We would expect the centre pixel of the scaled image NOT to be GREEN, as in the original non-scaled image this was set to RED.
        assert_false(np.array_equal(scaled_result[200, 200], [0, 200, 0]))

        # We would expect all other pixels (apart from immediate neighbours around the centre pixel due to the interpolation) to still be GREEN.
        assert_true(np.array_equal(scaled_result[0, 0], [0, 200, 0]))
        assert_true(np.array_equal(scaled_result[399, 399], [0, 200, 0]))
        assert_true(np.array_equal(scaled_result[195, 195], [0, 200, 0]))
        assert_true(np.array_equal(scaled_result[205, 205], [0, 200, 0]))
    def test_scale_image_roi_relative_centre(self):

        origin_point = (0, 0)
        end_point = (10, 10)

        centre_point = (5, 5)

        scale_factor = 20

        scaled_origin_x = centre_point[0] + (
            (origin_point[0] - centre_point[0]) * scale_factor)
        scaled_origin_y = centre_point[1] + (
            (origin_point[1] - centre_point[1]) * scale_factor)

        scaled_end_x = centre_point[0] + (
            (end_point[0] - centre_point[0]) * scale_factor)
        scaled_end_y = centre_point[1] + (
            (end_point[1] - centre_point[1]) * scale_factor)

        result = TSEImageUtils.scale_image_roi_relative_centre(
            origin_point, end_point, scale_factor)

        assert_equal((scaled_origin_x, scaled_origin_y), result[0].to_tuple())
        assert_equal((scaled_end_x, scaled_end_y), result[1].to_tuple())
    def test_scale_image_interpolation_man(self):

        original_image = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)

        # Set the centre pixel fo the original image to a different colour for comparison once scaling is complete.
        original_image[100, 100] = [200, 0, 0]

        larger_target_image = np.zeros((400, 400, 3), dtype=np.uint8)

        # Calculate the scale factor based on the widths of the two images (as the width/height are equal, we can just use the width)
        scale_factor = TSEGeometry.calc_measure_scale_factor(
            original_image.shape[1], (larger_target_image.shape[1]))

        scaled_result = TSEImageUtils.scale_image_interpolation_man(
            original_image, scale_factor)

        # We would expect the centre pixel of the scaled image NOT to be GREEN, as in the original non-scaled image this was set to RED.
        assert_false(np.array_equal(scaled_result[200, 200], [0, 200, 0]))

        # We would expect all other pixels (apart from immediate neighbours around the centre pixel due to the interpolation) to still be GREEN.
        assert_true(np.array_equal(scaled_result[0, 0], [0, 200, 0]))
        assert_true(np.array_equal(scaled_result[399, 399], [0, 200, 0]))
        assert_true(np.array_equal(scaled_result[195, 195], [0, 200, 0]))
        assert_true(np.array_equal(scaled_result[205, 205], [0, 200, 0]))
    def scan_search_window(self,
                           template_patch,
                           template_patch_origin,
                           match_method,
                           force_cont_search=False):

        image_height, image_width = self._hsv_img2.shape[:2]

        template_patch_height, template_patch_width = template_patch.shape[:2]

        localised_window = self._hsv_img2[template_patch_origin.y:image_height,
                                          template_patch_origin.x:(
                                              template_patch_origin.x +
                                              template_patch_width)]

        localised_window_height, localised_window_width = localised_window.shape[:
                                                                                 2]

        best_score = -1
        best_position = 0

        stop = False

        for i in range(0, (localised_window_height - template_patch_height)):

            current_window = localised_window[i:(i + template_patch_height),
                                              0:template_patch_width]
            score = 0

            if match_method.match_type == tse_match_methods.DISTANCE_ED:
                score = TSEImageUtils.calc_euclidean_distance_cv2_norm(
                    template_patch, current_window)

            elif match_method.match_type == tse_match_methods.DISTANCE:
                score = TSEImageUtils.calc_template_match_compare_cv2_score(
                    template_patch, current_window, match_method.match_id)

            elif match_method.match_type == tse_match_methods.HIST:
                score = TSEImageUtils.calc_compare_hsv_histogram(
                    template_patch, current_window, match_method.match_id)

            # If lower score means better match, then the method is a 'reverse' method.
            if match_method.reverse_score:

                if best_score == -1 or score < best_score:
                    best_score = score
                    best_position = i

                else:
                    stop = True

            else:

                if best_score == -1 or score > best_score:
                    best_score = score
                    best_position = i

                else:
                    stop = True

            if (force_cont_search is False) and (stop is True):
                break

        # We need to return the 'Y' with the best score (i.e. the displacement)
        return best_position
 def test_constructor(self):
     test_imageutils = TSEImageUtils()
     assert_true(test_imageutils is not None)
    def scan_search_window_scaling(self,
                                   template_patch,
                                   template_patch_origin,
                                   match_method,
                                   force_cont_search=False):

        image_height, image_width = self._hsv_img2.shape[:2]

        image_centre_x = math.floor(image_width / 2)

        template_patch_height, template_patch_width = template_patch.shape[:2]

        new_localised_window_height = image_height - template_patch_height

        best_score = -1
        best_position = 0

        stop = False

        last_width = template_patch_width

        prev_current_window_scaled_coords = None

        for i in range(template_patch_origin.y, new_localised_window_height):

            score = 0

            if i >= (template_patch_origin.y + 1):
                last_width = self._calibration_lookup[i - 1]

            calibrated_patch_width = self._calibration_lookup[i]
            patch_half_width = math.floor(calibrated_patch_width / 2)
            scale_factor = TSEGeometry.calc_measure_scale_factor(
                last_width, calibrated_patch_width)

            if prev_current_window_scaled_coords is None:

                current_window_scaled_coords = TSEImageUtils.scale_image_roi_relative_centre(
                    ((image_centre_x - patch_half_width), i),
                    ((image_centre_x + patch_half_width),
                     (i + template_patch_height)), scale_factor)

            else:

                # We add +1 to the 'Y' coordinate as we are moving the search window down the ROI by one pixel each time we increase the width.
                current_window_scaled_coords = TSEImageUtils.scale_image_roi_relative_centre(
                    (prev_current_window_scaled_coords[0].x,
                     prev_current_window_scaled_coords[0].y + 1),
                    (prev_current_window_scaled_coords[1].x,
                     prev_current_window_scaled_coords[1].y + 1), scale_factor)

            prev_current_window_scaled_coords = current_window_scaled_coords

            scaled_search_window = TSEImageUtils.extract_image_sub_window(
                self._hsv_img2, current_window_scaled_coords[0],
                current_window_scaled_coords[1])

            if match_method.match_type == tse_match_methods.DISTANCE_ED:
                score = TSEImageUtils.calc_ed_template_match_score_scaled_compiled(
                    template_patch, scaled_search_window)

            elif match_method.match_type == tse_match_methods.DISTANCE:
                score = TSEImageUtils.calc_template_match_compare_cv2_score_scaled(
                    template_patch, scaled_search_window,
                    match_method.match_id)

            elif match_method.match_type == tse_match_methods.HIST:
                scaled_template_patch = TSEImageUtils.scale_image_interpolation_auto(
                    template_patch, scaled_search_window)

                score = TSEImageUtils.calc_compare_hsv_histogram(
                    scaled_template_patch, scaled_search_window,
                    match_method.match_id)

            # If lower score means better match, then the method is a 'reverse' method.
            if match_method.reverse_score:

                if best_score == -1 or score < best_score:
                    best_score = score
                    best_position += 1

                else:
                    stop = True

            else:

                if best_score == -1 or score > best_score:
                    best_score = score
                    best_position += 1

                else:
                    stop = True

            if (force_cont_search is False) and (stop is True):
                break

        # We need to return the 'Y' with the best score (i.e. the displacement)
        return best_position