Example #1
0
    def __init__(self, vision_communication, flight_communication, camera):
        warnings.filterwarnings("ignore")
        ##
        self.vision_communication = vision_communication
        self.flight_communication = flight_communication

        self.camera = camera.__iter__()

        ##
        if os.path.isdir("vision"):
            prefix = "vision"
        elif os.path.isdir("integration_tests"):
            prefix = "../vision"
        else:
            prefix = ""

        #
        config_filename = os.path.join(prefix, "obstacle", "config.json")

        with open(config_filename, "r") as config_file:
            config = json.load(config_file)

        self.obstacle_finder = ObstacleFinder(params=import_params(config))

        self.obstacle_tracker = ObstacleTracker()

        self.text_detector = TextDetector()

        self.module_location = ModuleLocation()

        self.vision_flags = Queue()
Example #2
0
class TimeObstacle:
    """
    Timing ObstacleFinder methods.
    """
    DEFAULT_DIMS = (1280, 720)
    DEFAULT_RADIUS = 100

    def setup(self):
        """
        Configure blob detector and initialize images.
        """
        ## Generate images
        self.PARAMETERS = {}

        self.PARAMETERS.update(common.blank_dimensions())

        base_color, base_depth = common.blank_dimensions(self.DEFAULT_DIMS)

        #
        for radius in [25, 50, 100, 250]:
            color_image, depth_image = np.copy(base_color), np.copy(base_depth)

            cv2.circle(color_image, (640, 360), radius, (255, 255, 255), thickness=-1)
            cv2.circle(depth_image, (640, 360), radius, (255), thickness=-1)

            self.PARAMETERS.update({f'radius={radius}': (color_image, depth_image)})

        # One to each corner
        for n_obj in range(4):
            color_image, depth_image = np.copy(base_color), np.copy(base_depth)

            for location in [(320, 180), (320, 540), (960, 180), (960, 540)][:n_obj]:
                cv2.circle(color_image, location, self.DEFAULT_RADIUS, (255, 255, 255), thickness=-1)
                cv2.circle(depth_image, location, self.DEFAULT_RADIUS, (255), thickness=-1)

            self.PARAMETERS.update({f'n_obj={n_obj}': (color_image, depth_image)})

        # On default noise specturm
        for title, (color_image, depth_image) in common.noise().items():
            cv2.circle(color_image, (640, 360), self.DEFAULT_RADIUS, (255, 255, 255), thickness=-1)
            cv2.circle(depth_image, (640, 360), self.DEFAULT_RADIUS, (255), thickness=-1)

            self.PARAMETERS.update({f'{title} single': (color_image, depth_image)})

        ## Read current params & setup obstacle detector
        prefix = '' if os.path.isdir("times") else '..'

        config_filename = os.path.join(prefix, '..', 'obstacle', 'config.json')

        with open(config_filename, 'r') as config_file:
            config = json.load(config_file)

        self.blob_finder = ObstacleFinder(params=import_params(config))

    def time_find(self, color_image, depth_image):
        """
        Time the ObstacleFinder.find function.
        """
        self.blob_finder.find(color_image, depth_image)
Example #3
0
    def setup(self) -> None:
        """
        Setup obstacle detector via json file.

        Returns
        -------
        None
        """
        config_filename = os.path.join("vision", "obstacle", "config.json")

        with open(config_filename, "r") as config_file:
            config = json.load(config_file)

        self.obstacle_finder = ObstacleFinder(params=import_params(config))
        self.obstacle_tracker = ObstacleTracker()
Example #4
0
class AccuracyObstacle:
    """
    Measuring accuracy of obstacle detection.
    """

    def __init__(self):
        self.setup()

    def setup(self) -> None:
        """
        Setup obstacle detector via json file.

        Returns
        -------
        None
        """
        config_filename = os.path.join("vision", "obstacle", "config.json")

        with open(config_filename, "r") as config_file:
            config = json.load(config_file)

        self.obstacle_finder = ObstacleFinder(params=import_params(config))
        self.obstacle_tracker = ObstacleTracker()

    def accuracy_find(self, color_image: np.ndarray, depth_image: np.ndarray) -> list:
        """
        Test accuracy of obstacle finder.

        Parameters
        ----------
        color_image: np.ndarray
            The color image.
        depth_image: np.ndarray
            The depth image.

        Returns
        -------
        list<BoundingBox>
            A list of detected obstacles.
        """
        return self.obstacle_finder.find(color_image, depth_image)

    def acuracy_track(self, new_obstacles: list) -> list:
        """
        Test accuracy of obstacle tracker.

        Parameters
        ----------
        new_obstacles: list<BoundingBox>
            List of newly detected obstacles.

        Returns
        -------
        list<BoundingBox>
            A list of obstacles tracked between frames.
        """
        self.obstacle_tracker.update(new_obstacle_boxes=new_obstacles)
        return self.obstacle_tracker.getPersistentObstacles()
Example #5
0
    def __init__(self, vision_communication, flight_communication, camera):
        ##
        self.vision_communication = vision_communication
        self.flight_communication = flight_communication

        self.camera = camera.__iter__()

        ##
        prefix = 'vision' if os.path.isdir("vision") else ''

        #
        config_filename = os.path.join(prefix, 'obstacle', 'config.json')

        with open(config_filename, 'r') as config_file:
            config = json.load(config_file)

        self.obstacle_finder = ObstacleFinder(params=import_params(config))

        self.module_location = ModuleLocation()
Example #6
0
    def setup(self):
        """
        Setup obstacle detector
        """
        prefix = '' if os.path.isdir("times") else '..'

        config_filename = os.path.join(prefix, '..', 'obstacle', 'config.json')

        with open(config_filename, 'r') as config_file:
            config = json.load(config_file)

        self.blob_finder = ObstacleFinder(params=import_params(config))
Example #7
0
class Pipeline:
    """
    Pipeline to carry information from the cameras through
    the various vision algorithms out to flight.

    Parameters
    -------------
    vision_communication: multiprocessing Queue
        Interface to share vision information with flight.
    flight_communication: multiprocessing Queue
        Interface to recieve flight state information from flight.
    camera: Camera
        Camera to pull image from.
    """
    PUT_TIMEOUT = 1  # Expected time for results to be irrelevant.

    def __init__(self, vision_communication, flight_communication, camera):
        ##
        self.vision_communication = vision_communication
        self.flight_communication = flight_communication

        self.camera = camera.__iter__()

        ##
        prefix = 'vision' if os.path.isdir("vision") else ''

        #
        config_filename = os.path.join(prefix, 'obstacle', 'config.json')

        with open(config_filename, 'r') as config_file:
            config = json.load(config_file)

        self.obstacle_finder = ObstacleFinder(params=import_params(config))

        self.module_location = ModuleLocation()

    @property
    def picture(self):
        return next(self.camera)

    def run(self, prev_state):
        """
        Process current camera frame.
        """
        ##
        depth_image, color_image = self.picture

        try:
            state = self.flight_communication.get_nowait()
        except Empty:
            state = prev_state

        ##
        bboxes = []

        if state == 'early_laps':
            bboxes = self.obstacle_finder.find(color_image, depth_image)
        elif state == 'module_detection':
            self.module_location.setImg(color_image, depth_image)
            center = self.module_location.getCenter()
            depth = get_module_depth(depth_image, center)
            #orientation = get_module_orientation(region_of_interest(depth_image, depth, center), center)
            box = BoundingBox(getModuleBounds(color_image, center, depth),
                              ObjectType.MODULE)
            box.module_depth = depth  # float
            #box.orientation = orientation # tuple
            bboxes.append(box)
        else:
            pass  # raise AttributeError(f"Unrecognized state: {state}")

        ##
        self.vision_communication.put((datetime.datetime.now(), bboxes),
                                      self.PUT_TIMEOUT)

        # from vision.common.blob_plotter import plot_blobs
        # plot_blobs(self.obstacle_finder.keypoints, color_image)

        return state
Example #8
0
    def test_find(self):
        """
        Testing ObstacleFinder.find.

        Parameters
        ----------
        image: ndarray

        Settings
        --------
        _params: cv2.SimpleBlobDetector_Params
            Parameters for blob_detector.
        blob_detector: cv2.SimpleBlobDetector_create(self.params)
            Blob detection algorithm.

        Returns
        -------
        list[BoundingBox]
        """
        ### Ensure range of Parameters work
        IMAGE_SIZE = (1920, 1080)

        ## 3 Channel {0..255} Image
        with self.subTest(i="3 Channel {0..255} Image"):
            detector = ObstacleFinder(params=self._get_params())

            color_image = np.random.randint(0, 255, size=(*IMAGE_SIZE, 3), dtype='uint8')

            detector.find(color_image, None)

        ### Ensure returns correct type of BoundingBox
        with self.subTest(i="Object type"):
            detector = ObstacleFinder(params=self._get_params())

            for i in range(1, 9):
                color_image = np.zeros((1000, 1000, 3), dtype='uint8')
                cv2.rectangle(color_image, (200, 200), (200 + (i * 100), 200 + (i * 100)), (255, 255, 255), 2)

                output = detector.find(color_image, None)

                if output is not None:
                    break
            else:
                self.fail(msg="Failed to detect obstacle to be able to test.")

            for box in output:
                self.assertIsInstance(box, BoundingBox)

                ##
                self.assertIn(len(box.vertices), [4, 8])

                X, Y = [], []
                for vertex in box.vertices:
                    self.assertIsInstance(vertex, tuple)

                    X.append(vertex[0])
                    Y.append(vertex[1])

                self.assertGreater(max(X) - min(X), 50)
                self.assertGreater(max(Y) - min(Y), 50)

                ##
                self.assertIsInstance(box.object_type, ObjectType)
                self.assertEqual(box.object_type, ObjectType.AVOID)

        ## Ensure does not modify original image
        detector = ObstacleFinder(params=self._get_params())

        color_image = np.zeros((1000, 1000, 3), dtype='uint8')
        cv2.rectangle(color_image, (200, 200), (200 + (i * 100), 200 + (i * 100)), (255, 255, 255), 2)

        color_parameter = np.copy(color_image)

        detector.find(color_parameter, None)

        np.testing.assert_array_equal(color_image, color_parameter)
Example #9
0
class Pipeline:
    """
    Pipeline to carry information from the cameras through
    the various vision algorithms out to flight.

    Parameters
    -------------
    vision_communication: multiprocessing Queue
        Interface to share vision information with flight.
    flight_communication: multiprocessing Queue
        Interface to recieve flight state information from flight.
    camera: Camera
        Camera to pull image from.
    """

    PUT_TIMEOUT = 1  # Expected time for results to be irrelevant.

    def __init__(self, vision_communication, flight_communication, camera):
        warnings.filterwarnings("ignore")
        ##
        self.vision_communication = vision_communication
        self.flight_communication = flight_communication

        self.camera = camera.__iter__()

        ##
        if os.path.isdir("vision"):
            prefix = "vision"
        elif os.path.isdir("integration_tests"):
            prefix = "../vision"
        else:
            prefix = ""

        #
        config_filename = os.path.join(prefix, "obstacle", "config.json")

        with open(config_filename, "r") as config_file:
            config = json.load(config_file)

        self.obstacle_finder = ObstacleFinder(params=import_params(config))

        self.obstacle_tracker = ObstacleTracker()

        self.text_detector = TextDetector()

        self.module_location = ModuleLocation()

        self.vision_flags = Queue()

    @property
    def picture(self):
        return next(self.camera)

    async def run(self, prev_state):
        """
        Process current camera frame.
        """
        ##
        depth_image, color_image = self.picture

        try:
            state = self.flight_communication.get_nowait()
        except Empty:
            state = prev_state
        ##

        bboxes = []

        flags = FailureFlags()

        if state == "early_laps":  # navigation around the pylons
            flags = ObstacleDetectionFlags()

            try:
                bboxes = self.obstacle_finder.find(color_image, depth_image)
            except:
                flags.obstacle_finder = False

            if flags.obstacle_finder:
                try:
                    self.obstacle_tracker.update(bboxes)
                    bboxes = self.obstacle_tracker.get_persistent_obstacles()
                except:
                    flags.obstacle_tracker = False

        elif state == "text_detection":  # approaching mast
            flags = TextDetectionFlags()

            try:
                bboxes = self.text_detector.detect_russian_word(
                    color_image, depth_image)
            except:
                flags.detect_russian_word = False

        elif state == "module_detection":  # locating module
            flags = ModuleDetectionFlags()
            try:
                self.module_location.set_img(color_image, depth_image)
            except:
                flags.set_img = False

            if flags.set_img:
                try:
                    bboxes.extend(
                        self.text_detector.detect_russian_word(
                            color_image, depth_image))
                except:
                    flags.detect_russian_word = False

            if flags.detect_russian_word:
                try:
                    self.module_location.set_text(bboxes)
                except:
                    flags.set_text = False

            if flags.set_img:
                try:
                    in_frame = self.module_location.is_in_frame()
                except:
                    in_frame = False
                    flags.is_in_frame = False

                # only do more calculation if module is in the image
                if in_frame:
                    # default values for bounding box construction
                    center = (0, 0)
                    roll = 0
                    depth = 0
                    region = np.empty(1)
                    orientation = (0, 0)
                    bounds = np.empty(1)

                    try:
                        center = (self.module_location.get_center()
                                  )  # center of module in image]
                    except:
                        flags.get_center = False
                    if flags.get_center:
                        try:
                            depth = get_module_depth(
                                depth_image,
                                center)  # depth of center of module
                        except:
                            flags.get_module_depth = False

                        if flags.get_module_depth:
                            try:
                                region = get_region_of_interest(
                                    depth_image, depth, center
                                )  # depth image sliced on underestimate bounds
                            except:
                                flags.get_region_of_interest = False

                            if flags.get_region_of_interest:
                                try:
                                    orientation = get_module_orientation(
                                        region)  # x and y tilt of module
                                except:
                                    flags.get_module_orientation = False

                            try:
                                bounds = get_module_bounds(
                                    np.shape(color_image)[:2], center,
                                    depth)  # overestimate of bounds
                            except:
                                flags.get_module_bounds = False

                            if flags.get_module_bounds:
                                try:
                                    roll = get_module_roll(
                                        color_image[
                                            bounds[0][1]:bounds[3][1],
                                            bounds[0][0]:bounds[2][0], :, ]
                                    )  # roll of module
                                except:
                                    flags.get_module_roll = False
                                # construct boundingbox for the module
                                box = BoundingBox(bounds, ObjectType.MODULE)
                                box.module_depth = depth  # float
                                box.orientation = orientation + (
                                    roll, )  # x, y, z tilt
                                bboxes.append(box)

        else:
            pass  # raise AttributeError(f"Unrecognized state: {state}")

        time = datetime.datetime.now()

        # You need to index vision_flags to see output, "self.vision_flags.get()[1]"
        self.vision_flags.put((time, flags), self.PUT_TIMEOUT)
        ##
        await asyncio.sleep(0.001)
        self.vision_communication.put((time, bboxes), self.PUT_TIMEOUT)
        # uncomment to visualize blobs
        # from vision.common.blob_plotter import plot_blobs
        # plot_blobs(self.obstacle_finder.keypoints, color_image)

        return state
Example #10
0
            # Press esc or 'q' to close the image window
            if key == ord('q') or key == 27 or cv2.getWindowProperty(
                    'Depth/Color Stream', 0) == -1:
                cv2.destroyAllWindows()
                break


if __name__ == '__main__':
    ## Display algorithm output on simulator
    import json
    from vision.obstacle.obstacle_finder import ObstacleFinder
    from vision.common.import_params import import_params

    camera = SimCamera()

    prefix = 'vision' if os.path.isdir("vision") else ''
    config_filename = os.path.join(prefix, 'obstacle', 'config.json')

    with open(config_filename, 'r') as config_file:
        config = json.load(config_file)

    obstacle_finder = ObstacleFinder(params=import_params(config))

    for depth_image, color_image in camera:
        bboxes = []

        bboxes = obstacle_finder.find(color_image, depth_image)

        from vision.common.blob_plotter import plot_blobs
        plot_blobs(obstacle_finder.keypoints, color_image)