def do_image(self, im, blobs):
        if blobs is not None:
            x1, y1, w1, h1 = cv2.boundingRect(blobs[0])

            area = w1 * h1
            if (area > self.min_area) and (area < self.max_area):
                if self.verbose:
                    print("[Goal] x: %d, y: %d, w: %d, h: %d, total "
                          "area: %d" % (x1, y1, w1, h1, area))

                offset_x, offset_y = cv_utils.process_image(im, x1, y1, w1, h1)

                self.network.send({
                    "found": True,
                    "offset_x": offset_x,
                    "offset_y": offset_y
                })

                if self.display:
                    # Draw image details
                    im = cv_utils.draw_images(im, x1, y1, w1, h1)

                    return im
            else:
                self.network.send_new({"found": False})
        else:
            self.network.send_new({"found": False})

        return im
Example #2
0
    def run_image(self):
        if self.verbose:
            print("Image path specified, reading from %s" % self.image)

        im = cv2.imread(self.image)

        im_rect, im_mask = cv_utils.draw_images(im, self.lower, self.upper,
                                                self.min_area)

        if self.display:
            # Show the images
            cv2.imshow("Original", im_rect)
            cv2.imshow("Mask", im_mask)

            cv2.waitKey(0)

            cv2.destroyAllWindows()
Example #3
0
    def run_image(self):
        if self.verbose:
            print("Image path specified, reading from %s" % self.image)

        im = cv2.imread(self.image)

        blob, im_mask = cv_utils.get_blob(im, self.lower, self.upper)
        if blob is not None:
            x1, y1, w1, h1 = cv2.boundingRect(blob[0])
            x2, y2, w2, h2 = cv2.boundingRect(blob[1])

            if w1 * h1 > self.min_area and w2 * h2 > self.min_area:
                if verbose:
                    print(
                        "[Blob 1] x: %d, y: %d, width: %d, height: %d, area: %d"
                        % (x1, y1, w1, h1, w1 * h1))
                    print(
                        "[Blob 2] x: %d, y: %d, width: %d, height: %d, area: %d"
                        % (x2, y2, w2, h2, w2 * h2))

                im_rect = cv_utils.draw_images(im, x1, y1, w1, h1, False)

                offset_x, offset_y = cv_utils.process_image(
                    im, x1 * x2 / 2, y1 * y2 / 2, w1 * w2 / 2, h1 * h2 / 2)

                print(offset_x)
                print(offset_y)

                nt_utils.put_number("offset_x", offset_x)
                nt_utils.put_number("offset_y", offset_y)
        else:
            if verbose:
                print("No largest blob was found")

        if self.display:
            # Show the images
            if blob is not None:
                cv2.imshow("Original", im_rect)
                cv2.imshow("Mask", im_mask)
            else:
                cv2.imshow("Original", im)

            cv2.waitKey(0)

            cv2.destroyAllWindows()
Example #4
0
    def run_video(self):
        camera = cv2.VideoCapture(0)

        if self.verbose:
            print("No image path specified, reading from camera video feed")

        timeout = 0

        while (True):
            # Read image from file
            (ret, im) = camera.read()

            if ret:
                im_rect, im_mask = cv_utils.draw_images(
                    im, self.lower, self.upper, self.min_area)
                offset_x, offset_y = cv_utils.process_image(
                    im, self.lower, self.upper, self.min_area)

                nt_utils.put_number("offset_x", offset_x)
                nt_utils.put_number("offset_y", offset_y)

                if self.display:
                    # Show the images
                    cv2.imshow("Original", im_rect)
                    cv2.imshow("Mask", im_mask)

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
            else:
                if (timeout == 0):
                    print("No camera detected")

                timeout += 1

                if (timeout > 500):
                    print("Camera search timed out")
                    break

        camera.release()
        cv2.destroyAllWindows()
Example #5
0
    def do_image(self, im):
        # Get all the contours in the image and the mask used to find them
        blobs, mask = cv_utils.get_blobs(im, self.settings['lower'],
                                         self.settings['upper'])

        # Don't process any blobs if vision isn't locked
        if not self.locked:
            self.tracker.deregister_all()
            return im, mask

        found_blob = False

        # Create list of rectangles that bound the blob
        bounding_rects = [cv2.boundingRect(blob) for blob in blobs]

        # Sort list of blobs by x-value and zip them together with their corresponding bounding rectangle
        sorted_blobs = sorted(zip(bounding_rects, blobs),
                              key=lambda x: x[0],
                              reverse=True)

        goals = []
        prev_target = None
        prev_blob = None
        for bounding_rect, blob in sorted_blobs:
            if blob is not None and mask is not None:
                # Get the location and size of the rectangle bounding the contour
                x, y, w, h = bounding_rect

                # Skip over the blob if its area is too small
                if w * h < self.settings['min_area']:
                    continue

                # Returns a rectangle of minimum area that bounds the blob (accounts for blobs at an angle)
                target = cv2.minAreaRect(blob)
                # Gets the coordinates of the 4 corners of the rectangle bounding the blob
                box = np.int0(cv2.boxPoints(target))

                # Straighten box to have a top-down view and get the transformed width and height
                transformed_box = cv_utils.four_point_transform(mask, box)
                width, height = transformed_box.shape

                # Calculate the proportion of the transformed box that's filled up by the blob
                full = cv_utils.get_percent_full(transformed_box)
                area = width * height

                # Check to make sure the box is sufficiently filled
                if self.settings['min_area'] <= area <= self.settings[
                        'max_area'] and self.settings[
                            'min_full'] <= full <= self.settings['max_full']:
                    if self.verbose:
                        print(
                            '[Goal] x: %d, y: %d, w: %d, h: %d, area: %d, full: %f, angle: %f'
                            % (x, y, width, height, area, full, target[2]))

                    if self.display:
                        # Draw rectangles around goals and points on the center of the goal
                        im = cv_utils.draw_images(im, target, box)

                    if prev_target is not None:
                        sum = abs(prev_target[2]) - abs(target[2])
                        # Track both the left and right sides of the vision target
                        if sum < 0:
                            goals.append(
                                ((prev_target, target), (prev_blob, blob)))

                    # Left vision tape
                    prev_target = target
                    prev_blob = blob

        if len(goals) > 0:
            goal_centers = None

            try:
                # Calculate robot angle, target angle, x distance, y distance, distance, and centroid
                goal_centers = [
                    cv_utils.process_image(im, goal[0], goal[1])
                    for goal in goals
                ]
            except:
                pass

            if goal_centers is None:
                return im, mask

            # Zip goal centers with their corresponding goals and sort by the angle difference between robot and target
            possible_goals = sorted(zip(goal_centers, goals),
                                    key=lambda x: abs(x[0][0] + x[0][1]))

            objects = self.tracker.update(
                [centers[5] for centers, _ in possible_goals])

            centers = None
            goal = None

            # Check if it's the first time locking onto the particular goal
            if self.lock_id is None:
                # Find the goal closest to where the robot is facing
                probable_goal = possible_goals[0]

                centers, goal = probable_goal
                # Assign an id to the goal based on the location of the centroid
                for index in objects:
                    if centers[5] == objects[index]:
                        self.lock_id = index
                        break
            else:
                try:
                    # Find the tracked goal that corresponds to the lock id
                    centers, goal = next(
                        filter(
                            lambda goal: goal[0][5] == objects[self.lock_id],
                            possible_goals))
                except Exception:
                    print('Exception while finding goal with lock id')

            if centers is not None:
                robot_angle, target_angle, x_distance, y_distance, distance, _ = centers

                # Put calculations on NetworkTables
                put('distance', distance)
                put('x_distance', x_distance)
                put('y_distance', y_distance)
                put('robot_angle', robot_angle)
                put('target_angle', target_angle)

                found_blob = True

        put('found', found_blob)

        # Send the data to NetworkTables
        flush()

        return im, mask
Example #6
0
    def run_video(self):
        camera = WebcamVideoStream(src=self.source).start()

        if self.verbose:
            print("No image path specified, reading from camera video feed")

        timeout = 0

        fourcc = cv2.VideoWriter_fourcc(*"XVID")
        if self.output_file:
            videoWrite = cv2.VideoWriter(
                self.output_file, fourcc, 30.0,
                (640,
                 480))  # For clarification, the 30.0 argument specifies FPS
        while True:
            if nt_utils.get_boolean("shutdown"):
                os.system("shutdown -H now")
                return

            im = camera.read()
            try:
                lowerThreshold = np.array([
                    nt_utils.get_number("front_lower_blue"),
                    nt_utils.get_number("front_lower_green"),
                    nt_utils.get_number("front_lower_red")
                ])
                upperThreshold = np.array([
                    nt_utils.get_number("front_upper_blue"),
                    nt_utils.get_number("front_upper_green"),
                    nt_utils.get_number("front_upper_red")
                ])
            except:
                lowerThreshold = self.lower
                upperThreshold = self.upper
            print(upperThreshold, lowerThreshold)

            if im is not None:
                im = cv2.resize(im, (640, 480), 0, 0)
                try:
                    blob, im_mask = cv_utils.get_blob(im, lowerThreshold,
                                                      upperThreshold)
                except TypeError:
                    blob, im_mask = cv_utils.get_blob(im, self.lower,
                                                      self.upper)
                if blob is not None:
                    x1, y1, w1, h1 = cv2.boundingRect(blob[0])
                    x2, y2, w2, h2 = cv2.boundingRect(blob[1])

                    area1 = w1 * h1
                    area2 = w2 * h2
                    totalArea = area1 + area2
                    if (totalArea > self.min_area) and (totalArea <
                                                        self.max_area):
                        if verbose:
                            print(
                                "[Blob] x: %d, y: %d, width: %d, height: %d, total area: %d"
                                % (x1, y1, w1, h1, totalArea))

                        offset_x, offset_y = cv_utils.process_image(
                            im, x1, y1, w1, h1, x2, y2, w2, h2)

                        nt_utils.put_number("offset_x", offset_x)
                        nt_utils.put_number("offset_y", offset_y)
                        nt_utils.put_boolean("blob_found", True)
                        nt_utils.put_number("blob1_size", w1 * h1)
                        nt_utils.put_number("blob2_size", w2 * h2)
                    else:
                        nt_utils.put_boolean("blob_found", False)

                    if self.display:
                        # Draw image details
                        im = cv_utils.draw_images(im, x1, y1, w1, h1, True)
                        im = cv_utils.draw_images(im, x2, y2, w2, h2, False)

                        # Show the images
                        cv2.imshow("Original", im)
                        cv2.imshow("Mask", im_mask)
                else:
                    nt_utils.put_boolean("blob_found", False)

                    if verbose:
                        print("No largest blob was found")

                    if self.display:
                        cv2.imshow("Original", im)

                # Write to video file
                if self.output_file:
                    videoWrite.write(im)

                if cv2.waitKey(1) & 0xFF == ord("q"):
                    break
            else:
                if (timeout == 0):
                    print("No camera detected")

                timeout += 1

                if (timeout > 500):
                    print("Camera search timed out")
                    break

        cv2.destroyAllWindows()
Example #7
0
    def do_image(self, im):
        blobs, mask = cv_utils.get_blobs(im, self.settings['lower'],
                                         self.settings['upper'])

        if not self.locked:
            self.tracker.deregister_all()
            return im, mask

        found_blob = False

        # Create array of contour areas
        bounding_rects = [cv2.boundingRect(blob) for blob in blobs]

        # Sort array of blobs by x-value
        sorted_blobs = sorted(zip(bounding_rects, blobs),
                              key=lambda x: x[0],
                              reverse=True)

        goals = []
        prev_target = None
        prev_blob = None
        for bounding_rect, blob in sorted_blobs:
            if blob is not None and mask is not None:
                x, y, w, h = bounding_rect

                if w * h < self.settings['min_area']:
                    continue

                target = cv2.minAreaRect(blob)
                box = np.int0(cv2.boxPoints(target))

                transformed_box = cv_utils.four_point_transform(mask, box)
                width, height = transformed_box.shape

                full = cv_utils.get_percent_full(transformed_box)
                area = width * height

                if self.settings['min_area'] <= area <= self.settings[
                        'max_area'] and self.settings[
                            'min_full'] <= full <= self.settings['max_full']:
                    if self.verbose:
                        print(
                            '[Goal] x: %d, y: %d, w: %d, h: %d, area: %d, full: %f, angle: %f'
                            % (x, y, width, height, area, full, target[2]))

                    if self.display:
                        # Draw image details
                        im = cv_utils.draw_images(im, target, box)

                    if prev_target is not None:
                        sum = abs(prev_target[2]) - abs(target[2])

                        if sum < 0:
                            goals.append(
                                ((prev_target, target), (prev_blob, blob)))

                    prev_target = target
                    prev_blob = blob

        if len(goals) > 0:
            goal_centers = None

            try:
                goal_centers = [
                    cv_utils.process_image(im, goal[0], goal[1])
                    for goal in goals
                ]
            except:
                pass

            if goal_centers is None:
                return im, mask

            possible_goals = sorted(zip(goal_centers, goals),
                                    key=lambda x: abs(x[0][0] + x[0][1]))

            objects = self.tracker.update(
                [centers[5] for centers, _ in possible_goals])

            centers = None
            goal = None

            if self.lock_id is None:
                probable_goal = possible_goals[0]

                centers, goal = probable_goal
                for index in objects:
                    if centers[5] == objects[index]:
                        self.lock_id = index
                        break
            else:
                try:
                    centers, goal = next(
                        filter(
                            lambda goal: goal[0][5] == objects[self.lock_id],
                            possible_goals))
                except Exception:
                    print('Exception while finding goal with lock id')

            if centers is not None:
                robot_angle, target_angle, x_distance, y_distance, distance, _ = centers

                put('distance', distance)
                put('x_distance', x_distance)
                put('y_distance', y_distance)
                put('robot_angle', robot_angle)
                put('target_angle', target_angle)

                found_blob = True

        put('found', found_blob)

        # Send the data to NetworkTables
        flush()

        return im, mask