Ejemplo n.º 1
0
    def update(self):
        if not hasattr(self, 'cameras'):
            self.cameras = [getattr(self._robot, c) for c in self._names]

        self._markers = sum([detect_markers(c.frame) for c in self.cameras],
                            [])
        self.sensors = [Marker(m) for m in self._markers]
Ejemplo n.º 2
0
    def capture(self):
        rospy.loginfo("[%s] Start capturing." % (self.node_name))

        for frame in self.camera.capture_continuous(self.stream,
                                                    format="bgr",
                                                    use_video_port=True):
            stamp = rospy.Time.now()

            img = cv2.remap(frame.array, self.map1, self.map2,
                            cv2.INTER_LINEAR)

            grey = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            _, grey = cv2.threshold(grey, 127, 255, cv2.THRESH_BINARY)

            markers = detect_markers(grey)

            tag = Tag()
            tag.header.stamp = stamp
            for m in markers:
                tag.id = m.id
                tag.point = self.image2ground(m.center)
                self.pub_tags.publish(tag)

            self.stream.truncate(0)

            if rospy.is_shutdown():
                break

        self.camera.close()
        rospy.loginfo("[%s] Capture ended." % (self.node_name))
Ejemplo n.º 3
0
def markers(args, must_print=True):
    global nao_motion
    data.IP = args['--ip']
    data.PORT = int(args['--port'])
    next_img_func = get_nao_image
    try:
        if args['--no-robot']:
            next_img_func = get_webcam_image
    except KeyError:
        pass
    end_reached = False
    nao_motion = MotionController()
    nao_motion.stand()
    while not end_reached:
        img = next_img_func(int(args['--cam-no']), res=2)

        detected_markers = detect_markers(img)

        for m in detected_markers:
            m.draw_contour(img)
            cv2.putText(img, str(m.id), tuple(int(p) for p in m.center),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
            if must_print:
                print "ID: ", str(m.id), "\n", "Contours: ", str(m.contours.ravel().reshape(-1, 2).tolist())
                print
            else:
                end_reached = True
        print
        if not args['--no-image']:
            cv2.imshow('Markers', img)
        if cv2.waitKey(1) == 27:
            print "Esc pressed : exit"
            close_camera()
            return 0
Ejemplo n.º 4
0
    def __init__(self, robot, name, cameras, freq, multiprocess=True):
        SensorsController.__init__(self, None, [], freq)

        self.name = name

        self._robot = robot
        self._names = cameras

        self.detect = (lambda img: self._bg_detection(img)
                       if multiprocess else detect_markers(img))
Ejemplo n.º 5
0
    def getUpperHoleCoordinatesUsingMarkers(self, index, camera_position, camera_matrix, camera_dist,
                                            tries=1, debug=False, res=640):
        """
        :param res: The resolution length
        :param index: the index of the hole
        :type index: int
        :param camera_position: the 6D position of the camera used for the detection (the bottom one),
                                    from the robot torso
        :type camera_position: tuple
        :param camera_matrix: the camera distortion matrix
        :type camera_matrix: np.array
        :param camera_dist: the camera distortion coefficients vector
        :type camera_dist: np.array
        :param debug: if True, draw the detected markers
        :type debug: bool
        :param tries: the number of times the detection will be run. If one try fails,
                      the whole detection is considered as failed
        :type tries: int
        Get an upper hole's coordinates using the Hamming markers on the Connect 4.
        This method assumes that the Hamming codes are visible on the image it will
            acquire using its "next_img_func". Otherwise, the detection fails
        """
        max_nb_of_markers = 0
        rvec = None
        tvec = None
        if res == 640:
            i_res = 2
        else:
            i_res = 1
        for i in range(tries):
            if self.cam_no == -1:
                img = self.next_img_func(1, res=i_res)  # We get the image from the bottom camera
            else:
                img = self.next_img_func(self.cam_no, res=i_res)
            min_nb_of_codes = 2
            markers = detect_markers(img)
            if markers is not None and len(markers) >= min_nb_of_codes:
                if debug:
                    for m in markers:
                        m.draw_contour(img)
                        cv2.putText(img, str(m.id), tuple(int(p) for p in m.center),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)

                self.upper_hole_detector._hamcodes = markers
                self.upper_hole_detector.runDetection([], markers)
                if len(markers) > max_nb_of_markers:
                    max_nb_of_markers = len(markers)
                    rvec, tvec = self.upper_hole_detector.match3DModel(camera_matrix, camera_dist)
            else:
                if debug:
                    cv2.imshow("Debug", img)
                if cv2.waitKey(100) == 27:
                    raise NotEnoughLandmarksException("The detection was interrupted")
                raise NotEnoughLandmarksException("The model needs at least " + str(min_nb_of_codes) + " detected codes")
        return self._getUpperHoleCoordinates(rvec, tvec, index, camera_position)
    def on_image(self, msg):
        img = self.bridge.imgmsg_to_cv2(msg, "bgr8")
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        _, img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)

        markers = detect_markers(img)

        tag = Tag()
        tag.header.stamp = msg.header.stamp
        for m in markers:
            tag.id = m.id
            tag.point = self.image2ground(m.center)
            self.pub_tags.publish(tag)
Ejemplo n.º 7
0
 def detectMarker(self, current_pitch, current_yaw):
     """
     :param current_pitch: current pitch of NAO's head to stock as data
     :param current_yaw: current yaw of NAO's head to stock as data
     Detect the marker of the hole in the picture
     """
     marker_found = False, None
     tries = 0
     while not marker_found[0] and tries < 10:
         img = self.nao_video.getImageFromCamera()
         markers = detect_markers(img)
         tries += 1
         for marker in markers:
             if marker.id == (self.selected_hole + 1) * 1000:
                 marker_found = True, marker
     if marker_found[0]:
         self.record(marker_found[1], current_pitch, current_yaw)
Ejemplo n.º 8
0
    def update(self):
        if not hasattr(self, 'cameras'):
            self.cameras = [getattr(self._robot, c) for c in self._names]

        self._markers = sum([detect_markers(c.frame) for c in self.cameras], [])
        self.sensors = [Marker(m) for m in self._markers]
Ejemplo n.º 9
0
    with closing(PoppyErgoJr()) as jr:
        jr.rest_posture.start()
        jr.rest_posture.wait_to_stop()

        traj_rec = PositionWatcher(jr, 25., jr.motors)

        jr.dance.start()
        traj_rec.start()

        t0 = time.time()
        while time.time() - t0 < D:
            cpu.append(psutil.cpu_percent())

            img = jr.camera.frame
            markers = detect_markers(img)
            for m in markers:
                m.draw_contour(img)

            time.sleep(.1)

        jr.dance.stop()
        traj_rec.stop()

        print('CPU M={}% (STD={})'.format(mean(cpu) * 4, std(cpu) * 4))

        fig = pylab.figure()
        ax = pylab.axes()
        traj_rec.plot(ax)
        pylab.show()
        fig.savefig('bench.png')
Ejemplo n.º 10
0
    def capture_data(self):

        # Get Frame
        okay, self.img = self.cap.read()
        if self.img is None:
            # Bad Image, do nothing
            return


        # convert image to grayscale then back so it still has
        # 3 color dimensions
        gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
        gray_img = cv2.cvtColor(gray_img, cv2.COLOR_GRAY2BGR)

        # threshold the image to either pure white or pure black
        # thresh is scaled so that images is thresholded at % out of 255
        self.thresh_img[:, :, :] = 255.0*np.round(gray_img[:, :, :]/(510.0*self.thresh))

        # blur the rounded image
        #blurred_image = cv2.GaussianBlur(self.thresh_img,(5,5),0)

        # find any valid AR tags in the image
        markers = detect_markers(self.thresh_img)

        # Assume no objects are available until proven otherwise
        ducky_unavailable = True
        duckybot_unavailable = True
        obstacle_unavailable = True

        # for each valid tag, get the pose
        for m in markers:
            # Draw the marker outline on the image
            m.draw_contour(self.img)

            # Label the tag ID on the image
            if cv2.__version__[0] == "2":
                # Latest Stable Version
                cv2.putText(self.img, str(m.id), tuple(int(p) for p in m.center), cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
            else:
                # version 3.1.0 (Dylans Version)
                cv2.putText(self.img, str(m.id), tuple(int(p) for p in m.center), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)

            # Get ducky pose if ducky AR tag was detected
            if (m.id == self.ducky_tag[0]):
                self.Ducky_Pose = self.get_object_pose(m, self.ducky_tag)
                ducky_unavailable = False

            # Get duckybot pose if duckybot AR tag was detected
            elif (m.id == self.duckybot_tag[0]):
                self.Duckybot_Pose = self.get_object_pose(m, self.duckybot_tag)
                duckybot_unavailable = False

            # Get obstacle pose if obstacle AR tag was detected
            elif (m.id == self.obstacle_tag[0]):
                self.Obstacle_Pose = self.get_object_pose(m, self.obstacle_tag)
                obstacle_unavailable = False
            elif (self.obstacle_tag[0] == -100):
                # Obstacle tag is in return any mode, get this unknown tag and set it to obstacle pose
                self.Obstacle_Pose = self.get_object_pose(m, self.obstacle_tag)
                obstacle_unavailable = False                

        # set poses for objects not found
        if ducky_unavailable:
            self.Ducky_Pose = [None, None]
        if duckybot_unavailable:
            self.Duckybot_Pose = [None, None]
        if obstacle_unavailable:
            self.Obstacle_Pose = [None, None]

        # Finished capturing available data, return
        return
Ejemplo n.º 11
0
 def _detect(self, q, img):
     q.put(detect_markers(img))
Ejemplo n.º 12
0
camera.exposure_mode = 'sports'
rawCapture = PiRGBArray(camera, size=(1280, 720))

#camera warm up
time.sleep(2.5)

# capture frames from the camera
for frame in camera.capture_continuous(rawCapture,
                                       format="bgr",
                                       use_video_port=True):
    # grab the raw NumPy array representing the image, then initialize the timestamp
    # and occupied/unoccupied text
    image = frame.array

    # Detect Markers
    markers = detect_markers(image)

    os.system('clear')

    # Draw contour and log positions
    for m in markers:
        print 'Found marker {} at {}'.format(m.id, m.center)
        m.draw_contour(image)

    # show the frame
    cv2.imshow("Markers", image)
    key = cv2.waitKey(1) & 0xFF

    # clear the stream in preparation for the next frame
    rawCapture.truncate(0)
Ejemplo n.º 13
0
    def capture_data(self):

        # Get Frame
        self.c.startCapture()

        try:
            image = self.c.retrieveBuffer()
        except PyCapture2.Fc2error as fc2Err:
            print "Error retrieving buffer : ", fc2Err

        self.cap = image.convert(PyCapture2.PIXEL_FORMAT.BGR)
        #        newimg.save("fc2TestImage.png", PyCapture2.IMAGE_FILE_FORMAT.PNG)

        self.c.stopCapture()

        self.img = np.array(self.cap.getData(), dtype="uint8").reshape(
            (self.cap.getRows(), self.cap.getCols(), 3))

        if self.img is None:
            # Bad Image, do nothing
            return

        self.img = cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB)

        markers = detect_markers(self.img)

        #print markers
        # Assume no objects are available until proven otherwise
        A_unavailable = True
        B_unavailable = True
        C_unavailable = True

        # for each valid tag, get the pose
        for m in markers:
            # Draw the marker outline on the image
            m.draw_contour(self.img)

            # Label the tag ID on the image
            if cv2.__version__[0] == "2":
                # Latest Stable Version
                cv2.putText(self.img, str(m.id),
                            tuple(int(p) for p in m.center),
                            cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)
            else:
                # version 3.1.0 (Dylans Version)
                cv2.putText(self.img, str(m.id),
                            tuple(int(p) for p in m.center),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 3)

            # Get A pose if AR tag was detected
            if (m.id == self.A_tag[0]):
                self.A_Pose = self.get_object_pose(m, self.A_tag)
                A_unavailable = False

            # Get B pose if AR tag was detected
            elif (m.id == self.B_tag[0]):
                self.B_Pose = self.get_object_pose(m, self.B_tag)
                B_unavailable = False

            # Get C pose if AR tag was detected
            elif (m.id == self.C_tag[0]):
                self.C_Pose = self.get_object_pose(m, self.C_tag)
                C_unavailable = False
            elif (self.C_tag[0] == -100):
                # C tag is in return any mode, get this unknown tag and set it to C pose
                self.C_Pose = self.get_object_pose(m, self.C_tag)
                C_unavailable = False

        # set poses for objects not found
        if A_unavailable:
            self.A_Pose = [None, None]
        if B_unavailable:
            self.B_Pose = [None, None]
        if C_unavailable:
            self.C_Pose = [None, None]

        # Finished capturing available data, return
        return
Ejemplo n.º 14
0
 def read_tag_ar(self, image):
     markers = detect_markers(image)
     if markers is None or len(markers) == 0:
         return None
     output = zxing.BarCode("")
     output.id = markers[0].id