示例#1
0
    def find(self, image):
        """
        Uses neural net for finding traffic light on given image.
        Only the best traffic light detection is added to storage.
        Code inspired by: https://medium.com/@WuStangDan/step-by-step-tensorflow-object-detection-api-tutorial-part-5-saving-and-deploying-a-model-8d51f56dbcf1

        :param image: selected image
        """

        img_expanded = np.expand_dims(image, axis=0)

        (boxes, scores, classes) = self.sess.run(
            [self.d_boxes, self.d_scores, self.d_classes],
            feed_dict={self.image_tensor: img_expanded})

        for box, score, class_id in list(zip(boxes[0], scores[0], classes[0])):

            if score < constants.DETECTOR_LIGHT_MINIMAL_SCORE:
                break

            if class_id == constants.TRAFFIC_LIGHT_CLASS_ID:
                y_min, x_min, y_max, x_max = box

                top_left = Coordinates(x_min, y_min, info=self._info)
                bottom_right = Coordinates(x_max, y_max, info=self._info)

                self.add_traffic_light(top_left=top_left, bottom_right=bottom_right)
示例#2
0
    def coordinates(self):
        """
        :return: Coordinates of vanishing point
        :raise VanishingPointError if vanishing point is defined by angle rather then point
        """

        return Coordinates(*self.point)
示例#3
0
    def extract_flow(self, old_positions, new_positions) -> (float, float):
        """
        Extracts optical flow for this tracked object. Optical flow is selected using an area around center point of
        this tracked object.

        :return: extracted flow corresponding to this tracked object
        """

        global_dx = 0
        global_dy = 0

        number_of_flows = 0
        for flow in zip(new_positions, old_positions):
            new_pos, old_pos = flow

            if self.in_area(Coordinates(*new_pos)):

                new_x, new_y = new_pos
                old_x, old_y = old_pos
                dx = new_x - old_x
                dy = new_y - old_y

                global_dx += dx
                global_dy += dy

                number_of_flows += 1

        if number_of_flows:
            return global_dx/number_of_flows, global_dy/number_of_flows
        else:
            return 0, 0
示例#4
0
    def select_manually(self, image):
        """
        Allows user to select traffic light position manualy

        :param image: image to select traffic light on
        """

        rectangle = cv2.selectROI("select_traffic_light", image, showCrosshair=True)
        cv2.destroyWindow("select_traffic_light")

        x, y, width, height = rectangle

        top_left = Coordinates(x, y)
        bottom_right = Coordinates(x + width, y + height)

        self.add_traffic_light(top_left=top_left, bottom_right=bottom_right)
示例#5
0
    def front_point(self):
        """
        :return: coordinates of front point - center of bounding box top line
        """

        return Coordinates((self._bottom_right[0] + self._top_left[0]) / 2,
                           self._top_left[1])
示例#6
0
    def tracker_point(self):
        """
        :return: coordinates of tracker point - center of bounding box bottom line
        """

        return Coordinates((self._bottom_right[0] + self._top_left[0]) / 2,
                           self._bottom_right[1])
示例#7
0
    def center_point(self):
        """
        :return: center point of bounding box
        """

        return Coordinates((self._bottom_right[0] + self._top_left[0]) / 2,
                           (self._bottom_right[1] + self._top_left[1]) / 2)
示例#8
0
    def right_bot_anchor(self):
        """
        :return: right bot anchor of tracked object
        """

        x = int((self.center.x + self.size.width / 2))
        y = int((self.center.y + self.size.height / 2))

        return Coordinates(x, y)
示例#9
0
    def left_top_anchor(self):
        """
        :return: top left anchor of tracked object
        """

        x = int((self.center.x - self.size.width / 2))
        y = int((self.center.y - self.size.height / 2))

        return Coordinates(x, y)
示例#10
0
    def __init__(self, video_path, light_detection_model, program_arguments):
        """
        :param video_path: input video path
        :param light_detection_model: path to the light detection model
        :param program_arguments: instance of Parser class containing program arguments
        """

        super().__init__(video_path)

        self._vanishing_points = []
        self._traffic_lights_repository = TrafficLightsRepository(
            model=light_detection_model, info=self)
        self._corridors_repository = TrafficCorridorRepository(self)

        self._detect_vanishing_points = True
        self._calibration_mode = CalibrationMode.AUTOMATIC

        self._tracker_start_area = Area(
            info=self,
            top_left=Coordinates(0, self.height / 3),
            top_right=Coordinates(self.width, self.height / 2),
            bottom_right=Coordinates(self.width, self.height),
            bottom_left=Coordinates(0, self.height))

        self._tracker_update_area = Area(
            info=self,
            top_left=Coordinates(0, self.height / 4),
            top_right=Coordinates(self.width, self.height / 4),
            bottom_right=Coordinates(self.width, self.height),
            bottom_left=Coordinates(0, self.height))

        # solve given program arguments
        self._solve_program_arguments(program_arguments)

        print(
            f"INFO: fps: {self.fps}, height: {self.height}, width: {self.width}, frame count: {self.frame_count}"
        )
示例#11
0
文件: detector.py 项目: Karpisek/BP
    def convert_box_to_centroid_object(self, box) -> (Coordinates, ObjectSize):
        """
        Converts box which is defined by top left and bottom right anchor into box defined by middle point
        and width and height

        :param box: bounding box to convert
        :return: converted bounding box
        """

        y_min, x_min, y_max, x_max = box

        x = (x_min + x_max) / 2
        y = (y_min + y_max) / 2

        center = Coordinates(x, y, info=self._info)

        width = x_max - x_min
        height = y_max - y_min

        size = ObjectSize(width, height, info=self._info)

        return center, size
示例#12
0
    def _detect_second_vanishing_point(self, new_frame, boxes_mask,
                                       boxes_mask_no_border,
                                       light_status) -> None:
        """
        Calculates second vanishing point using information about car positions and detection of edges supporting
        second vanishing point. It is being detected only if green light status is present on current frame.
        Detected lines from edges are accumulated into parallel coordinate space - RANSAC algorithm is used
        for intersection detection - Vanishing Point.


        After vanishing point is being found it propagates this information to InputInfo where it adds VanishingPoint
        to corresponding list

        :param new_frame: examined frame
        :param boxes_mask: mask used for selecting parts of image where cars exists
        :param boxes_mask_no_border: mask used for selecting parts of image where cars exists
        :param light_status: current light status
        """

        if light_status in [Color.RED, Color.RED_ORANGE]:
            return

        selected_areas = cv2.bitwise_and(
            new_frame, cv2.cvtColor(boxes_mask, cv2.COLOR_GRAY2RGB))
        blured = cv2.GaussianBlur(selected_areas, (7, 7), 0)

        canny = cv2.Canny(blured, 50, 150, apertureSize=3)
        no_border_canny = cv2.bitwise_and(canny, boxes_mask_no_border)

        no_border_canny = cv2.bitwise_and(no_border_canny,
                                          no_border_canny,
                                          mask=self._info.update_area.mask())
        lines = cv2.HoughLinesP(
            image=no_border_canny,
            rho=1,
            theta=np.pi / 350,
            threshold=constants.CALIBRATOR_HLP_THRESHOLD,
            minLineLength=constants.CALIBRATOR_MIN_LINE_LENGTH,
            maxLineGap=constants.CALIBRATOR_MAX_LINE_GAP)

        vp1 = self._info.vanishing_points[0]

        canny = cv2.cvtColor(no_border_canny, cv2.COLOR_GRAY2RGB)

        if lines is not None:
            for (x1, y1, x2, y2), in lines:
                point1 = x1, y1
                point2 = x2, y2

                try:
                    if vp1.coordinates.distance(Coordinates(
                            x1, y1)) > vp1.coordinates.distance(
                                Coordinates(x2, y2)):
                        line_to_vp = Line(point1, vp1.point)
                    else:
                        line_to_vp = Line(point2, vp1.point)

                    if Line(point1, point2).angle(line_to_vp) < 30 or Line(
                            point1, point2).angle(line_to_vp) > 150:
                        cv2.line(canny, point1, point2, constants.COLOR_RED, 2)
                        continue

                    self._pc_lines.add_to_pc_space(point1, point2)
                    cv2.line(canny, point1, point2, constants.COLOR_BLUE, 2)
                except SamePointError:
                    continue

            cv2.imwrite("test.jpg", canny)

        if self._pc_lines.count > constants.CALIBRATOR_VP2_TRACK_MINIMUM:
            new_vanishing_point = self._pc_lines.find_most_lines_cross()

            x, y = new_vanishing_point
            if y is not None:
                self._info.vanishing_points.append(
                    VanishingPoint(point=new_vanishing_point))
            else:
                dx = np.cos(np.deg2rad(x))
                dy = np.sin(np.deg2rad(x))
                direction = dx, dy
                self._info.vanishing_points.append(
                    VanishingPoint(direction=direction))

            self._pc_lines.clear()
示例#13
0
    def center(self):
        """
        :return: center Coordinates of trakced object
        """

        return Coordinates(int(self._kalman.statePost[0][0]), int(self._kalman.statePost[1][0]))
示例#14
0
    def tracker_point(self):
        """
        :return: middle point on bottom edge of tracked object
        """

        return Coordinates(x=self.center.x, y=int(self.center.y + self.size.height/2))
示例#15
0
    def principal_point(self) -> Coordinates:
        """
        :return: principal point coordinates of input video, Assuming that the principal point is in middle of image
        """

        return Coordinates(self.width / 2, self.height / 2)