示例#1
0
    def detect(self):
        """ Performs object detection and publishes coordinates. """
         
        # Initialize detector
        self.send_message(Color.GREEN, "[INFO] Initializing TinyYOLOv3 detector.")
        det = Detector("/home/nvidia/catkin_ws/src/boat/scripts/vantec-config/tiny3.cfg", "/home/nvidia/catkin_ws/src/boat/scripts/vantec-config/tiny3_68000.weights", "/home/nvidia/catkin_ws/src/boat/scripts/vantec-config/obj.names")
        
        (H, W) = (None, None)

        # Load model
        self.send_message(Color.GREEN, "[INFO] Loading network model.")
        net = det.load_model()
        print(net)

        # Initilialize Video Stream
        self.send_message(Color.GREEN, "[INFO] Starting video stream.")
        
        counter = 0
        dets = 0
        nondets = 0
        detect = True
        fps = FPS().start()
        boxes, confidences, indices, cls_ids, colors, ids, distances = [], [], [], [], [], [], []
        zed_cam_size = 1280

        ret = True
        while not rospy.is_shutdown():
            # Grab next frame
    
            #ret, frame = video.read()
            frame = self.image
            #cap = cv2.VideoCapture("/home/nvidia/opencv_install/pajarito/bird.jpg")
            #hasFrame, frame = cap.read()
            color = ""
            diststring = ""

            ##AQUI SE MODIFICA EL VIDEO

            #frame = add_brightness(frame)
            #frame = add_darkness(frame)

            if cv2.waitKey(1) & 0xFF == ord ('q'):
                self.send_message(Color.RED, "[DONE] Quitting program.")
                break

            frame = imutils.resize(frame, width=1000)

            (H, W) = frame.shape[:2]
            if det.get_w() is None or det.get_h() is None:
                det.set_h(H)
                det.set_w(W)

            # Perform detection
            
            detect = True
            dets += 1
            # Get bounding boxes, condifences, indices and class IDs
            boxes, confidences, indices, cls_ids = det.get_detections(net, frame)
            # Publish detections
            
            

            # If there were any previous detections, draw them
            colors = []
            distances = []
            obj_list = ObjDetectedList()
            len_list = 0

            for ix in indices:
                i = ix[0]

                box = boxes[i]
                x, y, w, h = box
                x, y, w, h = int(x), int(y), int(w), int(h)

                if detect == True:
                    color = self.calculate_color(frame,x,y,h,w)
                    depth = self.img_depth[y:y+h,x:x+w]

                    d_list = []
                    for j in depth:
                        if str(j[1]) != 'nan' and str(j[1]) != 'inf':
                            d_list.append(j[1])


                    if len(d_list) != 0:
                        dist = np.mean(d_list)
                    else:
                        dist = 'nan'
                    

                    if (dist < .30):
                        diststring = "OUT OF RANGE"
                    else:
                        diststring = str(dist) + " m"
                
                    color = str(color.color)
                    colors.append(color)
                    distances.append(dist)
                

                    
                    if str(dist) != 'nan':
                        obj = ObjDetected()
                        #print(p1,p2)
                        obj.x = x
                        obj.y = y
                        obj.h = h
                        obj.w = w
                        obj.X = dist
                        obj.Y = 0
                        obj.color = color
                        obj.clase = 'bouy' if cls_ids[i] == 0 else 'marker'
                        len_list += 1
                        obj_list.objects.append(obj)

                            
                    det.draw_prediction(frame, cls_ids[i], confidences[i], color,diststring, x, y, x+w, y+h)

            det_str = "Det: {}, BBoxes {}, Colors {}, Distance {}".format(dets, boxes, colors, distances)
            self.send_message(Color.BLUE, det_str)
            fps.update()
            obj_list.len = len_list
            self.detector_pub.publish(obj_list)
            cv2.line(frame, (500,560), (500,0), (255,0,0))       
            fps.stop()

            info = [
                ("Detects: ", dets),
                ("No detects: ", nondets),
                ("FPS", "{:.2F}".format(fps.fps())),
            ]
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(frame, text, (10, det.get_h() - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # Show current frame
            #cv2.imshow("Frame", frame)
            #print(self.depth)
        
            #cv2.waitKey(3)
            rate.sleep()        
示例#2
0
def detect():
    """ Performs object detection and publishes coordinates. """

    #global image and depth frames from ZED camera
    global image
    global points_list

    points_list = [[0, 0, 0]] * 921600
    image = np.zeros((560, 1000, 3), np.uint8)
    # Initialize detector
    send_message(Color.GREEN, "[INFO] Initializing TinyYOLOv3 detector.")
    det = Detector(
        "/home/nvidia/catkin_ws/src/boat/scripts/vantec-config/tiny3.cfg",
        "/home/nvidia/catkin_ws/src/boat/scripts/vantec-config/tiny3_68000.weights",
        "/home/nvidia/catkin_ws/src/boat/scripts/vantec-config/obj.names")
    (H, W) = (None, None)

    # Load model
    send_message(Color.GREEN, "[INFO] Loading network model.")
    net = det.load_model()

    # Initilialize Video Stream
    send_message(Color.GREEN, "[INFO] Starting video stream.")

    counter = 0
    dets = 0
    nondets = 0
    detect = True
    fps = FPS().start()
    boxes, confidences, indices, cls_ids, colors, ids, distances = [], [], [], [], [], [], []

    ret = True
    while not rospy.is_shutdown():
        # Grab next frame

        #ret, frame = video.read()
        frame = image

        color = ""
        diststring = ""

        ##AQUI SE MODIFICA EL VIDEO

        #frame = add_brightness(frame)
        #frame = add_darkness(frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            send_message(Color.RED, "[DONE] Quitting program.")
            break

        print(frame.shape)
        frame = imutils.resize(frame, width=1000)

        (H, W) = frame.shape[:2]
        if det.get_w() is None or det.get_h() is None:
            det.set_h(H)
            det.set_w(W)

        # Perform detection

        detect = True
        dets += 1
        # Get bounding boxes, condifences, indices and class IDs
        boxes, confidences, indices, cls_ids = det.get_detections(net, frame)
        # Publish detections

        det_str = "Det: {}, BBoxes {}, Colors {}, Distance {}".format(
            dets, boxes, colors, distances)
        send_message(Color.BLUE, det_str)

        # If there were any previous detections, draw them

        colors = []
        distances = []
        obj_list = ObjDetectedList()
        len_list = 0

        for ix in indices:
            i = ix[0]

            box = boxes[i]
            x, y, w, h = box
            x, y, w, h = int(x), int(y), int(w), int(h)

            if detect == True:
                color = calculate_color(frame, x, y, h, w)
                p1 = int((x + w / 2) * 1.28)
                p2 = int((y + h / 2) * 1.28)

                dist = points_list[(p1 + p2 * 1280)][0]

                if (dist < .30):
                    diststring = "OUT OF RANGE"
                else:
                    diststring = str(dist) + " m"

                color = str(color.color)
                colors.append(color)
                distances.append(dist)

                obj = ObjDetected()
                print(p1, p2)
                obj.x = x
                obj.y = y
                obj.h = h
                obj.w = w
                obj.X = points_list[(p1 + p2 * 1280)][0]
                obj.Y = points_list[(p1 + p2 * 1280)][1]
                obj.color = color
                obj.clase = 'bouy' if cls_ids[i] == 0 else 'marker'
                len_list += 1
                obj_list.objects.append(obj)

                det.draw_prediction(frame, cls_ids[i], confidences[i], color,
                                    diststring, x, y, x + w, y + h)

        fps.update()
        obj_list.len = len_list
        detector_pub.publish(obj_list)
        cv2.line(frame, (500, 560), (500, 0), (255, 0, 0))

        fps.stop()

        info = [
            ("Detects: ", dets),
            ("No detects: ", nondets),
            ("FPS", "{:.2F}".format(fps.fps())),
        ]
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, det.get_h() - ((i * 20) + 20)),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

        # Show current frame

        #cv2.imshow("depth", depth_frame)
        cv2.imshow("Frame", frame)
        #while cv2.waitKey(0) and 0xFF == ord('q'):
        rate.sleep()