Exemplo n.º 1
0
    def test_feed_forward_superset_region(self):
        ff_track = mpf.VideoTrack(
            0, 2, -1, {
                0: mpf.ImageLocation(60, 300, 100, 40, -1,
                                     dict(ROTATION='260')),
                1: mpf.ImageLocation(160, 350, 130, 20, -1,
                                     dict(ROTATION='60')),
                2: mpf.ImageLocation(260, 340, 60, 60, -1, dict(ROTATION='20'))
            }, {})

        for rotation in range(0, 361, 20):
            job = mpf.VideoJob(
                'Test',
                test_util.get_data_file_path(
                    'rotation/feed-forward-rotation-test.png'),
                ff_track.start_frame, ff_track.stop_frame,
                dict(FEED_FORWARD_TYPE='SUPERSET_REGION',
                     ROTATION=str(rotation)), {}, ff_track)
            expected_min_num_blue = 0
            expected_max_num_blue = 0
            for il in ff_track.frame_locations.values():
                area = il.width * il.height
                perimeter = 2 * il.width + 2 * il.height
                expected_min_num_blue += area - perimeter
                expected_max_num_blue += area + perimeter

            frame = next(mpf_util.VideoCapture(job))
            actual_num_blue = count_matching_pixels(frame, (255, 0, 0))
            # Color of pixels along edges gets blended with nearby pixels during interpolation.
            self.assertLessEqual(actual_num_blue, expected_max_num_blue)
            self.assertGreaterEqual(actual_num_blue, expected_min_num_blue)
Exemplo n.º 2
0
    def get_detections_from_video(self, video_job):
        logger.info('[%s] Received video job: %s', video_job.job_name,
                    video_job)
        if video_job.feed_forward_track is not None:
            return [video_job.feed_forward_track]

        echo_job, echo_media = self.get_echo_msgs(video_job)

        track1 = mpf.VideoTrack(0, 1)
        track1.frame_locations[0] = mpf.ImageLocation(1, 2, 3, 4, -1, {
            'METADATA': 'test',
            'ECHO_JOB': echo_job,
            'ECHO_MEDIA': echo_media
        })

        track1.frame_locations[1] = mpf.ImageLocation(5, 6, 7, 8, -1)
        track1.frame_locations[1].detection_properties['ECHO_JOB'] = echo_job
        track1.frame_locations[1].detection_properties[
            'ECHO_MEDIA'] = echo_media
        track1.detection_properties.update(video_job.job_properties)
        track1.detection_properties.update(video_job.media_properties)

        track2 = mpf.VideoTrack(
            3, 4, -1, {
                3:
                mpf.ImageLocation(9, 10, 11, 12, -1,
                                  [('ECHO_JOB', echo_job),
                                   ('ECHO_MEDIA', echo_media)])
            }, mpf.Properties(ECHO_JOB=echo_job, ECHO_MEDIA=echo_media))
        # Make sure regular collections are accepted
        return [track1, track2]
    def get_detections_from_video_capture(self, video_job, video_capture):
        test = self._test

        for frame_index, frame in enumerate(video_capture):
            top_left_corner = frame[:20, :30]
            test.assertTrue(test_util.is_all_black(top_left_corner))
            bottom_right_corner = frame[80:, 50:]
            test.assertTrue(test_util.is_all_black(bottom_right_corner))

            top_right_corner = frame[:20, 50:]
            test.assertTrue(test_util.is_all_white(top_right_corner))
            bottom_left_corner = frame[80:, :30]
            test.assertTrue(test_util.is_all_white(bottom_left_corner))

            for corner in (top_left_corner, bottom_right_corner, top_right_corner, bottom_left_corner):
                test.assertEqual(mpf_util.Size(30, 20), mpf_util.Size.from_frame(corner))

            yield mpf.VideoTrack(frame_index, frame_index,
                                 frame_locations={frame_index: mpf.ImageLocation(0, 0, 30, 20)})
            yield mpf.VideoTrack(frame_index, frame_index,
                                 frame_locations={frame_index: mpf.ImageLocation(50, 80, 30, 20)})
            yield mpf.VideoTrack(frame_index, frame_index,
                                 frame_locations={frame_index: mpf.ImageLocation(50, 0, 30, 20)})
            yield mpf.VideoTrack(frame_index, frame_index,
                                 frame_locations={frame_index: mpf.ImageLocation(0, 80, 30, 20)})
Exemplo n.º 4
0
    def test_feed_forward_exact_region(self):
        ff_track = mpf.VideoTrack(
            0, 2, -1, {
                0: mpf.ImageLocation(60, 300, 100, 40, -1,
                                     dict(ROTATION='260')),
                1: mpf.ImageLocation(160, 350, 130, 20, -1,
                                     dict(ROTATION='60')),
                2: mpf.ImageLocation(260, 340, 60, 60, -1, dict(ROTATION='20'))
            }, {})
        job = mpf.VideoJob(
            'Test',
            test_util.get_data_file_path(
                'rotation/feed-forward-rotation-test.png'),
            ff_track.start_frame, ff_track.stop_frame,
            dict(FEED_FORWARD_TYPE='REGION'), {}, ff_track)

        test_img = cv2.imread(job.data_uri)

        transformer = frame_transformer_factory.get_transformer(
            job, mpf_util.Size.from_frame(test_img))

        for frame_number, ff_detection in ff_track.frame_locations.items():
            frame = transformer.transform_frame(test_img, frame_number)
            frame_size = mpf_util.Size.from_frame(frame)
            self.assertEqual(frame_size,
                             (ff_detection.width, ff_detection.height))
            self.assert_image_color(frame, (255, 0, 0))

            size_as_tuple = typing.cast(typing.Tuple[int, int], frame_size)
            new_detection = mpf.ImageLocation(0, 0, *size_as_tuple)
            transformer.reverse_transform(new_detection, frame_number)
            self.assert_detections_same_location(new_detection, ff_detection)
Exemplo n.º 5
0
    def get_detections_from_image(image_job):
        logger.info('[%s] Received image job: %s', image_job.job_name,
                    image_job)
        if image_job.feed_forward_location is not None:
            yield image_job.feed_forward_location
            return
        il = mpf.ImageLocation(0, 0, 100, 100)
        echo_job, echo_media = TestComponent.get_echo_msgs(image_job)

        il.detection_properties['METADATA'] = 'extra info for first result'
        il.detection_properties['ECHO_JOB'] = echo_job
        il.detection_properties['ECHO_MEDIA'] = echo_media

        # Make sure generators are acceptable return values
        yield il

        error_code = image_job.job_properties.get('raise_exception', None)
        if error_code is not None:
            raise mpf.DetectionException('Exception Message',
                                         mpf.DetectionError[int(error_code)])

        yield mpf.ImageLocation(
            10, 20, 12, 34, -1, {
                'METADATA': 'extra info for second result',
                'ECHO_JOB': echo_job,
                'ECHO_MEDIA': echo_media
            })

        logger.info('[%s] Found %s detections', image_job.job_name, 2)
Exemplo n.º 6
0
def create_test_track():
    return mpf.VideoTrack(5,
                          10,
                          frame_locations={
                              5: mpf.ImageLocation(20, 30, 15, 5),
                              7: mpf.ImageLocation(0, 1, 2, 3),
                              10: mpf.ImageLocation(4, 5, 6, 7)
                          })
Exemplo n.º 7
0
 def test_can_fix_frame_pos_in_reverse_transform(self):
     cap = create_video_capture(5, 19, 2)
     il = mpf.ImageLocation(0, 1, 2, 3)
     track = mpf.VideoTrack(1, 6, frame_locations={1: il, 2: il, 6: il})
     cap.reverse_transform(track)
     self.assertEqual(7, track.start_frame)
     self.assertEqual(17, track.stop_frame)
     self.assert_dict_contains_keys((7, 9, 17), track.frame_locations)
Exemplo n.º 8
0
    def test_can_handle_rotated_detection_touching_corner(self):
        self.verify_correctly_rotated(
            '30deg-bounding-box-top-left-corner.png',
            mpf.ImageLocation(0, 51, 100, 40, -1, dict(ROTATION='30.5')))

        self.verify_correctly_rotated(
            '60deg-bounding-box-top-left-corner.png',
            mpf.ImageLocation(0, 86, 100, 40, -1, dict(ROTATION='60')))

        self.verify_correctly_rotated(
            '200deg-bounding-box-top-left-corner.png',
            mpf.ImageLocation(108, 38, 100, 40, -1, dict(ROTATION='200')))

        self.verify_correctly_rotated(
            '20deg-bounding-box-bottom-left-corner.png',
            mpf.ImageLocation(0, 367, 30, 120, -1, dict(ROTATION='20')))

        self.verify_correctly_rotated(
            '160deg-bounding-box-bottom-right-corner.png',
            mpf.ImageLocation(599, 480, 30, 120, -1, dict(ROTATION='160')))

        self.verify_correctly_rotated(
            '260deg-bounding-box-top-right-corner.png',
            mpf.ImageLocation(640, 21, 30, 120, -1, dict(ROTATION='260')))

        self.verify_correctly_rotated(
            '270deg-bounding-box-top-right-corner.png',
            mpf.ImageLocation(640, 0, 30, 120, -1, dict(ROTATION='270')))
Exemplo n.º 9
0
 def test_rotation_threshold_with_feed_forward_and_negative_coordinate(
         self):
     ff_loc = mpf.ImageLocation(-10, 20, 50, 30, 1, dict(ROTATION='0.5'))
     job = mpf.ImageJob(
         'Test job', test_util.get_data_file_path('test_img.png'),
         dict(FEED_FORWARD_TYPE='REGION', ROTATION_THRESHOLD='1'), dict(),
         ff_loc)
     img = mpf_util.ImageReader(job).get_image()
     self.assertEqual(img.shape, (30, 40, 3))
def to_feed_forward_filter(interval_filter):
    frame_count = interval_filter.get_segment_frame_count()
    frame_location_map = dict()
    for i in range(frame_count):
        original_pos = interval_filter.segment_to_original_frame_position(i)
        frame_location_map[original_pos] = mpf.ImageLocation(0, 0, 0, 0)

    ff_track = mpf.VideoTrack(0, frame_count, frame_locations=frame_location_map)
    return FeedForwardFrameFilter(ff_track)
Exemplo n.º 11
0
    def test_can_handle_feed_forward_track(self):
        ff_track = mpf.VideoTrack(0,
                                  29,
                                  frame_locations={
                                      1: mpf.ImageLocation(5, 5, 5, 10),
                                      3: mpf.ImageLocation(4, 4, 5, 6),
                                      7: mpf.ImageLocation(5, 5, 8, 9),
                                      11: mpf.ImageLocation(4, 5, 5, 6),
                                      12: mpf.ImageLocation(4, 4, 1, 2),
                                      20: mpf.ImageLocation(5, 5, 5, 5),
                                      25: mpf.ImageLocation(4, 4, 5, 5)
                                  })

        job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1,
                           dict(FEED_FORWARD_TYPE='SUPERSET_REGION'), {},
                           ff_track)

        cap = mpf_util.VideoCapture(job)
        self.assertEqual(7, cap.frame_count)
        self.assertFalse(cap.get_initialization_frames_if_available(100))

        min_x = ff_track.frame_locations[3].x_left_upper
        max_x = ff_track.frame_locations[
            7].x_left_upper + ff_track.frame_locations[7].width
        min_y = ff_track.frame_locations[3].y_left_upper
        max_y = ff_track.frame_locations[
            1].y_left_upper + ff_track.frame_locations[1].height
        expected_size = mpf_util.Size(max_x - min_x, max_y - min_y)
        self.assertEqual(expected_size, cap.frame_size)

        self.assert_frame_read(cap, 1, expected_size, 0)
        self.assert_frame_read(cap, 3, expected_size, 1 / 7)
        self.assert_frame_read(cap, 7, expected_size, 2 / 7)
        self.assert_frame_read(cap, 11, expected_size, 3 / 7)
        self.assert_frame_read(cap, 12, expected_size, 4 / 7)
        self.assert_frame_read(cap, 20, expected_size, 5 / 7)
        self.assert_frame_read(cap, 25, expected_size, 6 / 7)

        self.assertAlmostEqual(1, cap.frame_position_ratio)
        self.assert_read_fails(cap)

        il = mpf.ImageLocation(0, 1, 2, 3)
        track = mpf.VideoTrack(0,
                               6,
                               frame_locations={
                                   1: il,
                                   2: il,
                                   4: il,
                                   5: il
                               })
        cap.reverse_transform(track)
        self.assertEqual(1, track.start_frame)
        self.assertEqual(25, track.stop_frame)
        self.assert_dict_contains_keys(track.frame_locations, (3, 7, 12, 20))
    def get_detections_from_image_reader(image_job, image_reader):
        logger.info('[%s] Received image job: %s', image_job.job_name,
                    image_job)
        model = get_model(image_job)  # A real component would use the model.

        img = image_reader.get_image()

        height, width, _ = img.shape
        logger.info('[%s] Image at %s: width = %s, height = %s',
                    image_job.job_name, image_job.data_uri, width, height)

        detection_sz = 20
        yield mpf.ImageLocation(width / 2 - detection_sz, 0, detection_sz,
                                height - 1, -1.0, dict(METADATA='full_height'))

        yield mpf.ImageLocation(
            0, 0, width / 4, height / 4, -1,
            dict(
                METADATA='top left corner, .25 width and .25 height of image'))
    def get_detections_from_video_capture(video_job, video_capture):
        logger.info('[%s] Received video job: %s', video_job.job_name,
                    video_job)
        model = get_model(video_job)  # A real component would use the model.

        width, height = video_capture.frame_size

        detections = dict()
        expand_rate = 5
        last_il = mpf.ImageLocation(0, 0, 1, 1)
        last_frame_read = 0
        for idx, frame in enumerate(video_capture):
            last_frame_read = idx
            last_il = mpf.ImageLocation(
                0, 0, min(width - 1, last_il.width + expand_rate),
                min(height - 1, last_il.height + expand_rate))
            detections[idx] = last_il

        if not detections:
            return ()
        return [mpf.VideoTrack(0, last_frame_read, frame_locations=detections)]
Exemplo n.º 14
0
    def test_reverse_transform_with_flip(self):
        frame_width = 100
        frame_height = 200

        transformer = AffineFrameTransformer.rotate_full_frame(
            0, True, NoOpTransformer((frame_width, frame_height)))

        # Test without existing flip
        detection = mpf.ImageLocation(10, 20, 40, 50)
        detection_reversed = mpf.ImageLocation(10, 20, 40, 50)
        transformer.reverse_transform(detection_reversed, 0)

        self.assertEqual(frame_width - detection.x_left_upper - 1,
                         detection_reversed.x_left_upper)
        self.assertEqual(detection.y_left_upper,
                         detection_reversed.y_left_upper)
        self.assertEqual(detection.width, detection_reversed.width)
        self.assertEqual(detection.height, detection_reversed.height)
        self.assertIn('HORIZONTAL_FLIP',
                      detection_reversed.detection_properties)
        self.assertTrue(
            mpf_util.get_property(detection_reversed.detection_properties,
                                  'HORIZONTAL_FLIP', False))

        # Test with existing flip
        detection = mpf.ImageLocation(10, 20, 40, 50, -1,
                                      dict(HORIZONTAL_FLIP='True'))
        detection_reversed = mpf.ImageLocation(10, 20, 40, 50, -1,
                                               dict(HORIZONTAL_FLIP='True'))
        transformer.reverse_transform(detection_reversed, 0)

        self.assertEqual(frame_width - detection.x_left_upper - 1,
                         detection_reversed.x_left_upper)
        self.assertEqual(detection.y_left_upper,
                         detection_reversed.y_left_upper)
        self.assertEqual(detection.width, detection_reversed.width)
        self.assertEqual(detection.height, detection_reversed.height)
        self.assertNotIn('HORIZONTAL_FLIP',
                         detection_reversed.detection_properties)
    def get_detections_from_image_reader(self, image_job, image_reader):
        test = self._test

        image = image_reader.get_image()
        top_left_corner = image[:20, :30]
        test.assertTrue(test_util.is_all_black(top_left_corner))
        bottom_right_corner = image[80:, 50:]
        test.assertTrue(test_util.is_all_black(bottom_right_corner))

        top_right_corner = image[:20, 50:]
        test.assertTrue(test_util.is_all_white(top_right_corner))
        bottom_left_corner = image[80:, :30]
        test.assertTrue(test_util.is_all_white(bottom_left_corner))

        for corner in (top_left_corner, bottom_right_corner, top_right_corner,
                       bottom_left_corner):
            test.assertEqual(mpf_util.Size(30, 20),
                             mpf_util.Size.from_frame(corner))

        yield mpf.ImageLocation(0, 0, 30, 20)
        yield mpf.ImageLocation(50, 80, 30, 20)
        yield mpf.ImageLocation(50, 0, 30, 20)
        yield mpf.ImageLocation(0, 80, 30, 20)
    def _assert_reverse_transform(self, image_reader, pre_transform_values,
                                  post_transform_values):
        il = mpf.ImageLocation(*pre_transform_values)
        image_reader.reverse_transform(il)
        self.assertEqual(post_transform_values[0], il.x_left_upper)
        self.assertEqual(post_transform_values[1], il.y_left_upper)
        self.assertEqual(post_transform_values[2], il.width)
        self.assertEqual(post_transform_values[3], il.height)

        expected_rotation = post_transform_values[4] if len(
            post_transform_values) > 4 else 0
        actual_rotation = mpf_util.get_property(il.detection_properties,
                                                'ROTATION', 0.0)
        self.assertTrue(
            mpf_util.rotation_angles_equal(expected_rotation, actual_rotation))
Exemplo n.º 17
0
    def verify_correctly_rotated(self, file_name, feed_forward_detection):
        full_path = test_util.get_data_file_path('rotation/' + file_name)
        job = mpf.ImageJob('Test', full_path, dict(FEED_FORWARD_TYPE='REGION'),
                           {}, feed_forward_detection)
        image_reader = mpf_util.ImageReader(job)
        img = image_reader.get_image()

        height, width = img.shape[:2]

        self.assertEqual(feed_forward_detection.width, width)
        self.assertEqual(feed_forward_detection.height, height)

        self.assert_image_color(img, (255, 0, 0))

        detection = mpf.ImageLocation(0, 0, width, height)
        image_reader.reverse_transform(detection)
        self.assert_detections_same_location(detection, feed_forward_detection)
Exemplo n.º 18
0
    def test_rotation_threshold_with_feed_forward(self):
        test_img_path = test_util.get_data_file_path(
            'rotation/hello-world.png')
        original_img = cv2.imread(test_img_path)
        ff_img_loc = mpf.ImageLocation(0, 0, original_img.shape[1],
                                       original_img.shape[0], -1,
                                       dict(ROTATION='354.9'))

        job = mpf.ImageJob(
            'test', test_img_path,
            dict(ROTATION_THRESHOLD='5.12', FEED_FORWARD_TYPE='REGION'),
            dict(), ff_img_loc)
        img = mpf_util.ImageReader(job).get_image()
        self.assertTrue(np.array_equal(original_img, img))

        job.job_properties['ROTATION_THRESHOLD'] = '5.00'
        img = mpf_util.ImageReader(job).get_image()
        self.assertFalse(np.array_equal(original_img, img))
    def test_can_use_search_region_with_feed_forward_frame_type(self):
        ff_track = mpf.VideoTrack(0, 15, frame_locations={1: mpf.ImageLocation(5, 5, 5, 5)})
        job_properties = dict(
            FEED_FORWARD_TYPE='FRAME',
            SEARCH_REGION_ENABLE_DETECTION='True',
            SEARCH_REGION_TOP_LEFT_X_DETECTION='3',
            SEARCH_REGION_TOP_LEFT_Y_DETECTION='3',
            SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION='6',
            SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION='8')

        job = mpf.VideoJob('TEST', FRAME_FILTER_TEST_VIDEO, 0, -1, job_properties, {}, ff_track)
        cap = mpf_util.VideoCapture(job)

        expected_size = mpf_util.Size(3, 5)
        self.assertEqual(expected_size, cap.frame_size)

        frame = next(cap)
        self.assertEqual(expected_size.width, frame.shape[1])
        self.assertEqual(expected_size.height, frame.shape[0])
Exemplo n.º 20
0
    def test_rotation_full_frame(self):
        frame_rotation = 15
        job = mpf.ImageJob(
            'test', test_util.get_data_file_path('rotation/hello-world.png'),
            dict(ROTATION=str(frame_rotation)), {})

        image_reader = mpf_util.ImageReader(job)
        image = image_reader.get_image()

        il = mpf.ImageLocation(0, 0, image.shape[1], image.shape[0])
        image_reader.reverse_transform(il)

        self.assertEqual(-141, il.x_left_upper)
        self.assertEqual(38, il.y_left_upper)
        self.assertEqual(image.shape[1], il.width)
        self.assertEqual(image.shape[0], il.height)
        self.assertNotIn('HORIZONTAL_FLIP', il.detection_properties)
        self.assertAlmostEqual(frame_rotation,
                               float(il.detection_properties['ROTATION']))
Exemplo n.º 21
0
    def test_feed_forward_cropper_crop_to_exact_region(self):
        ff_track = mpf.VideoTrack(4,
                                  29,
                                  frame_locations={
                                      4: mpf.ImageLocation(10, 60, 65, 125),
                                      15: mpf.ImageLocation(60, 20, 100, 200),
                                      29: mpf.ImageLocation(70, 0, 30, 240)
                                  })
        job = mpf.VideoJob('Test', FRAME_FILTER_TEST_VIDEO, 4, 29,
                           dict(FEED_FORWARD_TYPE='REGION'), {}, ff_track)
        cap = mpf_util.VideoCapture(job)
        output_track = mpf.VideoTrack(0, 2)

        frame_pos = cap.current_frame_position
        frame = next(cap)
        self.assertEqual(4, get_frame_number(frame))
        self.assertEqual((65, 125), mpf_util.Size.from_frame(frame))
        output_track.frame_locations[frame_pos] = mpf.ImageLocation(
            0, 0, frame.shape[1], frame.shape[0])

        frame_pos = cap.current_frame_position
        frame = next(cap)
        self.assertEqual(15, get_frame_number(frame))
        self.assertEqual((100, 200), mpf_util.Size.from_frame(frame))
        output_track.frame_locations[frame_pos] = mpf.ImageLocation(
            0, 0, frame.shape[1], frame.shape[0])

        frame_pos = cap.current_frame_position
        frame = next(cap)
        self.assertEqual(29, get_frame_number(frame))
        self.assertEqual((30, 240), mpf_util.Size.from_frame(frame))
        output_track.frame_locations[frame_pos] = mpf.ImageLocation(
            5, 40, 15, 60)

        self.assert_read_fails(cap)

        cap.reverse_transform(output_track)
        self.assertEqual(len(ff_track.frame_locations),
                         len(output_track.frame_locations))

        self.assertEqual(ff_track.frame_locations[4],
                         output_track.frame_locations[4])
        self.assertEqual(ff_track.frame_locations[15],
                         output_track.frame_locations[15])

        last_detection = output_track.frame_locations[29]
        self.assertEqual(75, last_detection.x_left_upper)
        self.assertEqual(40, last_detection.y_left_upper)
        self.assertEqual(15, last_detection.width)
        self.assertEqual(60, last_detection.height)
Exemplo n.º 22
0
    def test_search_region_with_non_orthogonal_rotation(self):
        job = mpf.ImageJob(
            'Test',
            test_util.get_data_file_path('rotation/20deg-bounding-box.png'),
            dict(ROTATION='20',
                 SEARCH_REGION_ENABLE_DETECTION='true',
                 SEARCH_REGION_TOP_LEFT_X_DETECTION='199',
                 SEARCH_REGION_TOP_LEFT_Y_DETECTION='245',
                 SEARCH_REGION_BOTTOM_RIGHT_X_DETECTION='299',
                 SEARCH_REGION_BOTTOM_RIGHT_Y_DETECTION='285'), {}, None)
        image_reader = mpf_util.ImageReader(job)
        img = image_reader.get_image()
        self.assert_image_color(img, (255, 0, 0))

        il = mpf.ImageLocation(0, 0, img.shape[1], img.shape[0])
        image_reader.reverse_transform(il)
        self.assertEqual(117, il.x_left_upper)
        self.assertEqual(218, il.y_left_upper)
        self.assertEqual(100, il.width)
        self.assertEqual(40, il.height)
        actual_rotation = mpf_util.get_property(il.detection_properties,
                                                'ROTATION', 0.0)
        self.assertTrue(mpf_util.rotation_angles_equal(20, actual_rotation))
Exemplo n.º 23
0
 def test_can_handle_rotated_detection_near_middle(self):
     self.verify_correctly_rotated(
         '20deg-bounding-box.png',
         mpf.ImageLocation(116, 218, 100, 40, -1, dict(ROTATION='20')))