예제 #1
0
    def img_callback(self, data):
        try:
            print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        detector_utils.draw_box_on_image(self.num_objects_detect,
                                         self.score_thresh, scores, boxes,
                                         classes, self.im_width,
                                         self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (datetime.datetime.now() -
                        self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.publisher.publish(self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)
예제 #2
0
파일: localizer.py 프로젝트: uf-mil/mil
    def img_callback(self, data):
        self.parse_label_map()
        # if self.check_timestamp():
        # return None

        cv_image = data
        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
        self.im_height, self.im_width, channels = cv_image.shape
        print("Height ", self.im_height, " Width: ", self.im_width)
        #  cv_image = cv2.resize(cv_image, (256, 144))
        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        labelled_image, bbox = detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image, self.target)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (datetime.datetime.now() -
                        self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)
        print("bbox:", bbox)
        if len(bbox) > 0:
            pointx = (bbox[0][0] + bbox[1][0]) / 2
            pointy = (bbox[0][1] + bbox[1][1]) / 2
            pointxdist = abs(bbox[0][0] - bbox[1][0])
            pointydist = abs(bbox[0][1] - bbox[1][1])
            print(pointxdist)
            msg = Point(x=pointx,
                        y=pointy,
                        z=self.see_sub.last_image_time.to_sec())
            print("X: ", pointx, "Y: ", pointy, "TIMESTAMP: ", msg.z)
            self.bbox_pub.publish(msg)
            roi = RegionOfInterest(x_offset=int(bbox[0][0]),
                                   y_offset=int(bbox[0][1]),
                                   height=int(pointydist),
                                   width=int(pointxdist))
            self.roi_pub.publish(roi)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.debug_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)
예제 #3
0
    def img_callback(self, data):
        try:
            print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (
            datetime.datetime.now() - self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.publisher.publish(self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)

        for i in range(self.num_objects_detect):
            if (scores[i] > self.score_thresh):
                (left, right, top, bottom) = (boxes[i][1] * self.im_width,
                                              boxes[i][3] * self.im_width,
                                              boxes[i][0] * self.im_height,
                                              boxes[i][2] * self.im_height)
                # top left corner of bbox
                p1 = np.array([int(left), int(top)])
                # bottom right corner of bbox
                p2 = np.array([int(right), int(bottom)])
                mid_point = (p1 + p2) / 2
                self.dice_publisher.publish(
                    Point(mid_point[0], mid_point[1], classes[i]))
예제 #4
0
            a, b = detector_utils.draw_box_on_image(
                num_hands_detect, score_thresh, scores, boxes, classes, im_width, im_height, frame, Line_Position2,
                Orientation)
            lst1.append(a)
            lst2.append(b)
            no_of_time_hand_detected = no_of_time_hand_crossed = 0
            # Calculate Frames per second (FPS)
            num_frames += 1
            elapsed_time = (datetime.datetime.now() -
                            start_time).total_seconds()
            fps = num_frames / elapsed_time

            if args['display']:

                # Display FPS on frame
                detector_utils.draw_text_on_image("FPS : " + str("{0:.2f}".format(fps)), frame)
                cv2.imshow('Detection', cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
                if cv2.waitKey(25) & 0xFF == ord('q'):
                    cv2.destroyAllWindows()
                    vs.stop()
                    break

        no_of_time_hand_detected = count_no_of_times(lst2)
        # no_of_time_hand_detected=b
        no_of_time_hand_crossed = count_no_of_times(lst1)
        # print(no_of_time_hand_detected)
        # print(no_of_time_hand_crossed)
        save_data(no_of_time_hand_detected, no_of_time_hand_crossed)
        print("Average FPS: ", str("{0:.2f}".format(fps)))

    except KeyboardInterrupt:
예제 #5
0
    def img_callback(self, data):
        if not self.enabled:
            # print(self.enabled)
            return None
        if self.check_timestamp(data):
            return None
        try:
            # print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)
        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        labelled_image, bbox = detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (datetime.datetime.now() -
                        self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.debug_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)

        # Find midpoint of the region of interest
        bbox_midpoint = [((bbox[0][1] + bbox[1][1]) / 2),
                         ((bbox[0][0] + bbox[1][0]) / 2)]
        # print(cv_image[int(bbox[0][0]):int(bbox[1][0]),
        # int(bbox[0][1]):int(bbox[1][1])])
        '''
        Confirm region of interest has orange where the bbox[0] contains the
        topleft coord and bbox[1] contains bottom right
        '''
        # create NumPy arrays from the boundaries
        lower = np.array(self.lower, dtype="uint8")
        upper = np.array(self.upper, dtype="uint8")
        # Run through the mask function, returns all black image if no orange
        check = self.mask_image(
            cv_image[int(bbox[0][1]):int(bbox[1][1]),
                     int(bbox[0][0]):int(bbox[1][0])], lower, upper)
        '''
        Find if we are centered on the region of interest, if not display its
        position relative to the center of the camera. Perform the check to see
        if we are looking at an image with orange in it. If not we are done
        here.
        '''
        check = cv2.cvtColor(check, cv2.COLOR_BGR2GRAY)
        if cv2.countNonZero(check) == 0:
            print('Check Failed.')
            return None
        else:
            # Where [0] is X coord and [1] is Y coord.
            self.find_direction(bbox_midpoint[1], bbox_midpoint[0])
        '''
        Once we center on the region of interest, assuming we are still
        locked on, calculate the curve of the marker.
        '''
        if self.centered:
            self.find_curve(check)
예제 #6
0
    def img_callback(self, data):

        if self.check_timestamp(data):
            return None

        try:
            print('Working')
            cv_image = self.bridge.imgmsg_to_cv2(data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)

        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        labelled_image, bbox = detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (
            datetime.datetime.now() - self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.debug_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)

        # Find midpoint of the region of interest
        bbox_midpoint = [((bbox[0][1] + bbox[1][1]) / 2),
                         ((bbox[0][0] + bbox[1][0]) / 2)]
        # print(cv_image[int(bbox[0][0]):int(bbox[1][0]),
        # int(bbox[0][1]):int(bbox[1][1])])
        '''
        Confirm region of interest has orange where the bbox[0] contains the
        topleft coord and bbox[1] contains bottom right
        '''
        # create NumPy arrays from the boundaries
        lower = np.array(self.lower, dtype="uint8")
        upper = np.array(self.upper, dtype="uint8")
        # Run through the mask function, returns all black image if no orange
        check = self.mask_image(cv_image[int(bbox[0][1]):int(bbox[1][1]),
                                         int(bbox[0][0]):int(bbox[1][0])],
                                lower, upper)
        '''
        Find if we are centered on the region of interest, if not display its
        position relative to the center of the camera. Perform the check to see
        if we are looking at an image with orange in it. If not we are done
        here.
        '''
        check = cv2.cvtColor(check, cv2.COLOR_BGR2GRAY)
        if cv2.countNonZero(check) == 0:
            print('Check Failed.')
            return None
        else:
            # Where [0] is X coord and [1] is Y coord.
            self.find_direction(bbox_midpoint[1], bbox_midpoint[0])
        '''
        Once we center on the region of interest, assuming we are still
        locked on, calculate the curve of the marker.
        '''
        if self.centered:
            self.find_curve(check)
예제 #7
0
def main():
    # Detection confidence threshold to draw bounding box
    score_thresh = 0.60

    vs = cvs.VideoCapture(1)

    # max number of hands we want to detect/track
    num_hands_detect = 1

    # Used to calculate fps
    start_time = datetime.datetime.now()
    num_frames = 0

    im_height, im_width = (None, None)

    try:
        while True:
            sleep(30)
            # Read Frame and process
            frame = cvs.read()
            if frame is None:
                continue
            frame = cv2.resize(frame, (640, 480))

            if im_height == None:
                im_height, im_width = frame.shape[:2]

            # Convert image to rgb since opencv loads images in bgr, if not accuracy will decrease
            try:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            except:
                print("Error converting to RGB")

            # Run image through tensorflow graph
            boxes, scores, classes = detector_utils.detect_objects(
                frame, detection_graph, sess)

            # Draw bounding boxeses and text
            detector_utils.draw_box_on_image(num_hands_detect, score_thresh,
                                             scores, boxes, classes, im_width,
                                             im_height, frame)

            # Calculate Frames per second (FPS)
            num_frames += 1
            elapsed_time = (datetime.datetime.now() -
                            start_time).total_seconds()
            fps = num_frames / elapsed_time

            # Display FPS on frame
            lbs = "FPS : " + str("{0:.2f}".format(fps))
            cvs.setLbs(lbs)

            if args['display']:

                detector_utils.draw_text_on_image(
                    "FPS : " + str("{0:.2f}".format(fps)), frame)

                cvs.imshow(cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))

        print("Average FPS: ", str("{0:.2f}".format(fps)))

    except KeyboardInterrupt:
        print("Average FPS: ", str("{0:.2f}".format(fps)))