Пример #1
0
    def localize_detection(self, detection):
        """
            2d image detection -> 3d point in [map frame]
        """
        u = detection.x + detection.width / 2
        v = detection.y + detection.height / 2

        camera_info = None
        best_time = 100
        for ci in self.camera_infos:
            time = abs(ci.header.stamp.to_sec() -
                       detection.header.stamp.to_sec())
            if time < best_time:
                camera_info = ci
                best_time = time

        if not camera_info or best_time > 1:
            return False, None

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(camera_info)

        point = Point(
            ((u - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),
            ((v - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(),
            1)

        localization = self.localize(detection.header, point,
                                     self.region_scope)

        if not localization:
            return False, None

        point_trans = None

        print "Face localized ", localization.pose.position.x, localization.pose.position.y
        if localization.pose.position.x == 0.0:
            print "Ignoring this one!"
            return False, None
        try:
            #(pos_trans, rot) = self.tf_listener.lookupTransform('/map', detection.header.frame_id, rospy.Time(0))
            point_trans = self.tf_listener.transformPoint(
                '/map',
                PointStamped(detection.header, localization.pose.position))
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            pass

        if point_trans == None:  # transformation failed
            return False, None

        print "Face detected at %d,%d" % (point_trans.point.x,
                                          point_trans.point.y)
        return True, point_trans.point
def main(args):
    global image
    rospy.init_node('skeleton_viewer', anonymous=True)

    image_info_sub = rospy.Subscriber("/camera/rgb/camera_info", CameraInfo,
                                      image_info_cb)

    if test_mode:
        cap = cv2.VideoCapture(0)

        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    else:
        ic = image_converter()

        cam_model = PinholeCameraModel()
        while (not camera_info) and (not rospy.is_shutdown()):
            time.sleep(1)
            print("waiting for camera info")

        cam_model.fromCameraInfo(camera_info)
        size = cam_model.fullResolution()
        print(cam_model.cx(), cam_model.cy(), cam_model.fullResolution())

    fps = 30
    rate = rospy.Rate(fps)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    writer = cv2.VideoWriter()
    success = writer.open(file, fourcc, fps, size, True)

    while not rospy.is_shutdown():
        if test_mode:
            ret, image = cap.read()

        writer.write(image)

        cv2.imshow("Image window", image)

        if cv2.waitKey(1) & 0xFF == 27:
            break

        rate.sleep()

    if test_mode:
        cap.release()

    writer.release()
Пример #3
0
    def is_visible(self, pose, point):
        """
        check if point is visible by projecting it onto 2d camera plane
        todo: distance check to prevent very far away points from passing

        Args:
            pose (Pose): current pose of vehicle
            point (Point): point to check

        Returns:
            bool: if point is visible
        """

        transformed = self.transform_point(pose.header, point)

        # pinhole camera model reverses x, y coordinates
        x = -transformed.point.y  # point.y = horizontal camera dimension
        y = -transformed.point.z  # point.z = vertical camera dimension
        z = transformed.point.x  # point.x = z/depth camera dimension

        # todo: does this need to be more elegant?
        if z > self.max_visible_distance:
            return False

        if not self.pinhole_camera_visible_check:
            return True

        # create camera info
        camera = PinholeCameraModel()
        camera.fromCameraInfo(self.camera_info)

        # project point onto image
        u, v = camera.project3dToPixel((x, y, z))

        # setup bounding box
        cx = camera.cx()
        cy = camera.cy()
        width = self.camera_info.width
        height = self.camera_info.height
        top = cy + height / 2
        bottom = cy - height / 2
        right = cx + width / 2
        left = cx - width / 2

        # light is visible if projected within the bounding box of the 2d image
        return left <= u and u <= right and bottom <= v and v <= top
class coord_converter:

  def __init__(self):
    self.image_sub = rospy.Subscriber("hough_circles/circles", CircleArrayStamped, self.callback)
    self.camera_info_msg = None
    self.camera_model = PinholeCameraModel() 
    self.image_info_sub = rospy.Subscriber("camera1/camera_info", CameraInfo, self.image_info_cb, queue_size=1) 

#    if self.camera_info_msg is not None:
#	print('PinholeCameraModel parameters:\n')
#       print('cx: ' + str(self.camera_model.cx()))
#	print('cy: ' + str(self.camera_model.cy()))
#	print('fx: ' + str(self.camera_model.fx()))
#	print('fy: ' + str(self.camera_model.fy()))

  def callback(self,data):
    print('\nIMAGE COORDINATES (center):')
    for i in range (len(data.circles)):
      print(' circle' + str(i+1) + ' detected:')
      print('  x: ' + str(data.circles[i].center.x))
      print('  y: ' + str(data.circles[i].center.y))
      
    print('\nWORLD COORDINATES (center):')
    print(' camera_robot coordinates:\n  x: 1.4\n  y: 2.2')
    for i in range (len(data.circles)):
      print(' circle' + str(i+1) + ' detected:')
      # using the equations showed at the beggining of this script:
      x_world = ((data.circles[i].center.x) - self.camera_model.cx()) * 1.25 / self.camera_model.fx() #z_world = camera rod height = 1.25
      y_world = ((data.circles[i].center.y) - self.camera_model.cy()) * 1.25 / self.camera_model.fy()
      print('  x: ' + str(1.4 + x_world)) #x=0 from image = 1.4 from world
      print('  y: ' + str(2.6 - y_world)) #y=0 from image = 2.6 from world / direction of y axis from world = -y from image

  def image_info_cb(self, msg):
    if self.camera_info_msg is None:
      self.camera_model.fromCameraInfo(msg)
      self.camera_info_msg = msg
Пример #5
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        #global tf
        global numFaces

        u = detection.x + detection.width / 2
        v = detection.y + detection.height / 2

        camera_info = None
        best_time = 100
        for ci in self.camera_infos:
            time = abs(ci.header.stamp.to_sec() -
                       detection.header.stamp.to_sec())
            if time < best_time:
                camera_info = ci
                best_time = time

        if not camera_info or best_time > 1:
            print("Best time is higher than 1")
            return

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(camera_info)

        point = Point(
            ((u - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),
            ((v - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(),
            1)

        localization = self.localize(detection.header, point,
                                     self.region_scope)
        transformedPoint = point
        if not localization:
            return

        now = detection.header.stamp
        self.tf.waitForTransform("camera_rgb_optical_frame", "map", now,
                                 rospy.Duration(5.0))

        m = geometry_msgs.msg.PoseStamped()
        m.header.frame_id = "camera_rgb_optical_frame"
        m.pose = localization.pose
        m.header.stamp = detection.header.stamp
        transformedPoint = self.tf.transformPose("map", m)

        if (localization.pose.position.x != 0.0):

            #print("Localisation: ", localization)
            print("Transformation: ", transformedPoint)
            #print("Point: ", point)

            if counter == 0:
                #print("Counter is zero.")
                counter += 1
                lastAdded = transformedPoint
                #print("Adding the point to the array.")
                #alreadyDetected.append(transformedPoint)
                #print(len(alreadyDetected))
            else:
                #print("Counter: ", counter)
                dist = self.distance(transformedPoint, lastAdded)

                #print("Number of detected faces so far: ", len(detected))
                print("Distance is ", dist)

                if dist <= maxDistance:
                    #print("-----------Less then max----------------------------------")
                    if counter < 2 or counter >= 2:
                        #alreadyDetected.append(transformedPoint)
                        #print(len(alreadyDetected))
                        lastAdded = transformedPoint
                        counter += 1
                        beenDetected = False
                        for p in detected:
                            #print("Checking already detected")
                            #print("Distance: ", self.distance(p, transformedPoint))

                            if self.distance(p,
                                             transformedPoint) <= maxDistance:
                                #print("This face has already been detected")
                                #print("Distance: ", self.distance(p, transformedPoint))
                                beenDetected = True
                                break

                        if (beenDetected == False):
                            print(
                                "-----------------Good to go------------------------"
                            )
                            detected.append(lastAdded)

                            marker = Marker()
                            #print("Localisation: ", localization)
                            marker.header.stamp = detection.header.stamp
                            marker.header.frame_id = detection.header.frame_id
                            marker.pose = localization.pose
                            marker.type = Marker.CUBE
                            marker.action = Marker.ADD
                            marker.frame_locked = False
                            marker.lifetime = rospy.Duration.from_sec(1)
                            marker.id = self.marker_id_counter
                            marker.scale = Vector3(0.1, 0.1, 0.1)
                            marker.color = ColorRGBA(1, 0, 0, 1)
                            self.markers.markers.append(marker)
                            self.marker_id_counter += 1
                            self.soundhandle.say("I detected a face.",
                                                 blocking=False)
                            print("Number of detected faces: ", len(detected))
                            if len(detected) == numFaces:
                                #publish stop

                                #rospy.init_node('mapper', anonymous=True)
                                #msg = String()
                                #msg.data = "Found all faces."
                                print("Sending shutdown")
                                self.pub.publish("Found all faces.")

                else:
                    #print("-----------------------------------------More then max----")
                    lastAdded = transformedPoint

        else:
            print("Localisation: ", localization)
Пример #6
0
class MarkerOccGrid(OccGridUtils):
    '''
    Handles updating occupancy grid when new data comes in.
    TODO: Upon call can return some path to go to in order to find them.
    '''
    def __init__(self, image_sub, grid_res, grid_width, grid_height,
                 grid_starting_pose):
        super(self.__class__, self).__init__(res=grid_res,
                                             width=grid_width,
                                             height=grid_height,
                                             starting_pose=grid_starting_pose)

        self.tf_listener = tf.TransformListener()

        self.cam = PinholeCameraModel()
        self.camera_info = image_sub.wait_for_camera_info()
        if self.camera_info is None:
            # Maybe raise an alarm here.
            rospy.logerr("I don't know what to do without my camera info.")

        self.cam.fromCameraInfo(self.camera_info)

    def update_grid(self, timestamp):
        '''
        Takes marker information to update occupacy grid.
        '''
        x_y_position, height = self.get_tf(timestamp)

        self.add_circle(x_y_position, self.calculate_visual_radius(height))
        self.publish_grid()

    def add_marker(self, marker, timestamp):
        '''
        Find the actual 3d pose of the marker and fill in the occupancy grid for that pose.
        This works by:
            1. Calculate unit vector between marker point and the image center in the image frame.
            2. Use height measurement to find real life distance (m) between center point and marker center.
            3. Use unit vec and magnitude to find dx and dy in meters.
            3. Pass info to OccGridUtils.
        '''
        if marker[0] is None:
            return

        x_y_position, height = self.get_tf(timestamp)

        if marker[2] < self.calculate_marker_area(height) * .6:
            # If the detected region isn't big enough dont add it.
            rospy.logwarn(
                "Marker found but it is too small, not adding marker.")
            return

        # Calculate position of marker accounting for camera rotation.
        dir_vector = unit_vector(
            np.array([self.cam.cx(), self.cam.cy()] - marker[0]))
        trans, rot = self.tf_listener.lookupTransform("/map", "/downward",
                                                      timestamp)
        cam_rotation = tf.transformations.euler_from_quaternion(
            rot)[2] + np.pi / 2
        dir_vector = np.dot(dir_vector, make_2D_rotation(cam_rotation))
        marker_rotation = cam_rotation + marker[1]

        trans, rot = self.tf_listener.lookupTransform("/map", "/base_link",
                                                      timestamp)
        if (np.abs(np.array(rot)[:2]) > .005).any():
            rospy.logwarn("We're at a weird angle, not adding marker.")

        magnitude = self.calculate_visual_radius(height,
                                                 second_point=marker[0])
        local_position = dir_vector[::-1] * magnitude
        position = local_position + x_y_position

        #print local_position

        # Pose on ground plane from center
        pose = Pose2D(x=position[0], y=position[1], theta=marker_rotation)
        self.found_marker(pose)

        # The image coordinate pose and real life pose are different.
        pose = Pose2D(x=position[0], y=position[1], theta=marker_rotation)
        # In meters with initial point at (0,0)
        return pose

    def get_tf(self, timestamp=None):
        '''
        x_y position and height in meters
        '''
        if timestamp is None:
            timestamp = rospy.Time()

        self.tf_listener.waitForTransform("/map", "/downward", timestamp,
                                          rospy.Duration(1.0))
        trans, rot = self.tf_listener.lookupTransform("/map", "/downward",
                                                      timestamp)
        x_y_position = trans[:2]
        self.tf_listener.waitForTransform("/ground", "/downward", timestamp,
                                          rospy.Duration(1.0))
        trans, _ = self.tf_listener.lookupTransform("/ground", "/downward",
                                                    timestamp)
        height = np.nan_to_num(trans[2])

        return x_y_position, height

    def correct_height(self, measured_height, timestamp):
        '''
        Adjust the measured height from the seafloor using our orientation relative to it.
        We assume the floor is flat (should be valid for transdec but maybe not for pool).
        All the math is just solving triangles.

        Note: If the roll or pitch is too far off, the range could be to a point that is not
            planar to the floor directly under us - in which case this will fail.

        TODO: See if this actually can produce better results.
        '''
        trans, rot = self.tf_listener.lookupTransform("/map", "/base_link",
                                                      timestamp)
        euler_rotation = tf.transformations.euler_from_quaternion(rot)

        roll_offset = np.abs(np.sin(euler_rotation[0]) * measured_height)
        pitch_offset = np.abs(np.sin(euler_rotation[1]) * measured_height)

        height = np.sqrt(measured_height**2 -
                         (roll_offset**2 + pitch_offset**2))
        return height

    def calculate_marker_area(self, height):
        '''
        Esitmate what the area of the marker should be so we don't add incorrect markers to the occupancy grid.
        What we really don't want is to add markers to the grid that are on the edge since the direction and center of
            the marker are off.
        '''
        MARKER_LENGTH = 1.22  # m
        MARKER_WIDTH = .1524  # m

        # Get m/px on the ground floor.
        m = self.calculate_visual_radius(height)
        if self.cam.cy() < self.cam.cx():
            px = self.cam.cy()
        else:
            px = self.cam.cx()

        m_px = m / px
        marker_area_m = MARKER_WIDTH * MARKER_LENGTH
        marker_area_px = marker_area_m / (m_px**2)

        return marker_area_px

    def calculate_visual_radius(self, height, second_point=None):
        '''
        Draws rays to find the radius of the FOV of the camera in meters.
        It also can work to find the distance between two planar points some distance from the camera.
        '''

        mid_ray = np.array([0, 0, 1])

        if second_point is None:
            if self.cam.cy() < self.cam.cx():
                second_point = np.array([self.cam.cx(), 0])
            else:
                second_point = np.array([0, self.cam.cy()])

        edge_ray = unit_vector(self.cam.projectPixelTo3dRay(second_point))

        # Calculate angle between vectors and use that to find r
        theta = np.arccos(np.dot(mid_ray, edge_ray))
        return np.tan(theta) * height
Пример #7
0
class MarkerOccGrid(OccGridUtils):
    '''
    Handles updating occupancy grid when new data comes in.
    TODO: Upon call can return some path to go to in order to find them.
    '''
    def __init__(self, image_sub, grid_res, grid_width, grid_height, grid_starting_pose):
        super(self.__class__, self).__init__(res=grid_res, width=grid_width, height=grid_height, starting_pose=grid_starting_pose)

        self.tf_listener = tf.TransformListener()

        self.cam = PinholeCameraModel()
        self.camera_info = image_sub.wait_for_camera_info()
        if self.camera_info is None:
            # Maybe raise an alarm here.
            rospy.logerr("I don't know what to do without my camera info.")

        self.cam.fromCameraInfo(self.camera_info)

    def update_grid(self, timestamp):
        '''
        Takes marker information to update occupacy grid.
        '''
        x_y_position, height = self.get_tf(timestamp)

        self.add_circle(x_y_position, self.calculate_visual_radius(height))
        self.publish_grid()

    def add_marker(self, marker, timestamp):
        '''
        Find the actual 3d pose of the marker and fill in the occupancy grid for that pose.
        This works by:
            1. Calculate unit vector between marker point and the image center in the image frame.
            2. Use height measurement to find real life distance (m) between center point and marker center.
            3. Use unit vec and magnitude to find dx and dy in meters.
            3. Pass info to OccGridUtils.
        '''
        if marker[0] is None:
            return

        x_y_position, height = self.get_tf(timestamp)

        if marker[2] < self.calculate_marker_area(height) * .6:
            # If the detected region isn't big enough dont add it.
            rospy.logwarn("Marker found but it is too small, not adding marker.")
            return

        # Calculate position of marker accounting for camera rotation.
        dir_vector = unit_vector(np.array([self.cam.cx(), self.cam.cy()] - marker[0]))
        trans, rot = self.tf_listener.lookupTransform("/map", "/downward", timestamp)
        cam_rotation = tf.transformations.euler_from_quaternion(rot)[2] + np.pi / 2
        dir_vector = np.dot(dir_vector, make_2D_rotation(cam_rotation))
        marker_rotation = cam_rotation + marker[1]

        trans, rot = self.tf_listener.lookupTransform("/map", "/base_link", timestamp)
        if (np.abs(np.array(rot)[:2]) > .005).any():
            rospy.logwarn("We're at a weird angle, not adding marker.")

        magnitude = self.calculate_visual_radius(height, second_point=marker[0])
        local_position = dir_vector[::-1] * magnitude
        position = local_position + x_y_position

        #print local_position

        # Pose on ground plane from center
        pose = Pose2D(x=position[0], y=position[1], theta=marker_rotation)
        self.found_marker(pose)

        # The image coordinate pose and real life pose are different.
        pose = Pose2D(x=position[0], y=position[1], theta=marker_rotation)
        # In meters with initial point at (0,0)
        return pose

    def get_tf(self, timestamp=None):
        '''
        x_y position and height in meters
        '''
        if timestamp is None:
            timestamp = rospy.Time()

        self.tf_listener.waitForTransform("/map", "/downward", timestamp, rospy.Duration(1.0))
        trans, rot = self.tf_listener.lookupTransform("/map", "/downward", timestamp)
        x_y_position = trans[:2]
        self.tf_listener.waitForTransform("/ground", "/downward", timestamp, rospy.Duration(1.0))
        trans, _ = self.tf_listener.lookupTransform("/ground", "/downward", timestamp)
        height = np.nan_to_num(trans[2])

        return x_y_position, height

    def correct_height(self, measured_height, timestamp):
        '''
        Adjust the measured height from the seafloor using our orientation relative to it.
        We assume the floor is flat (should be valid for transdec but maybe not for pool).
        All the math is just solving triangles.

        Note: If the roll or pitch is too far off, the range could be to a point that is not
            planar to the floor directly under us - in which case this will fail.

        TODO: See if this actually can produce better results.
        '''
        trans, rot = self.tf_listener.lookupTransform("/map", "/base_link", timestamp)
        euler_rotation = tf.transformations.euler_from_quaternion(rot)

        roll_offset = np.abs(np.sin(euler_rotation[0]) * measured_height)
        pitch_offset = np.abs(np.sin(euler_rotation[1]) * measured_height)

        height = np.sqrt(measured_height ** 2 - (roll_offset ** 2 + pitch_offset ** 2))
        return height

    def calculate_marker_area(self, height):
        '''
        Esitmate what the area of the marker should be so we don't add incorrect markers to the occupancy grid.
        What we really don't want is to add markers to the grid that are on the edge since the direction and center of
            the marker are off.
        '''
        MARKER_LENGTH = 1.22  # m
        MARKER_WIDTH = .1524  # m

        # Get m/px on the ground floor.
        m = self.calculate_visual_radius(height)
        if self.cam.cy() < self.cam.cx():
            px = self.cam.cy()
        else:
            px = self.cam.cx()

        m_px = m / px
        marker_area_m = MARKER_WIDTH * MARKER_LENGTH
        marker_area_px = marker_area_m / (m_px ** 2)

        return marker_area_px

    def calculate_visual_radius(self, height, second_point=None):
        '''
        Draws rays to find the radius of the FOV of the camera in meters.
        It also can work to find the distance between two planar points some distance from the camera.
        '''

        mid_ray = np.array([0, 0, 1])

        if second_point is None:
            if self.cam.cy() < self.cam.cx():
                second_point = np.array([self.cam.cx(), 0])
            else:
                second_point = np.array([0, self.cam.cy()])

        edge_ray = unit_vector(self.cam.projectPixelTo3dRay(second_point))

        # Calculate angle between vectors and use that to find r
        theta = np.arccos(np.dot(mid_ray, edge_ray))
        return np.tan(theta) * height
Пример #8
0
class Scanner:
    def __init__(self):
        self.pixels_per_scanline = rospy.get_param('~pixels_per_scanline')
        self.scanner_info_file_name = rospy.get_param(
            '~scanner_info_file_name')
        self.threshold = rospy.get_param('~threshold')

        self.mutex = threading.RLock()
        self.image_update_flag = threading.Event()
        self.bridge = CvBridge()
        rospy.Subscriber("image_stream", sensor_msgs.msg.Image,
                         self.update_image)
        rospy.loginfo("Waiting for camera info...")
        self.camera_info = rospy.wait_for_message('camera_info',
                                                  sensor_msgs.msg.CameraInfo)
        rospy.loginfo("Camera info received.")

        rospy.loginfo("Waiting for projector info service...")
        rospy.wait_for_service('get_projector_info')
        rospy.loginfo("Projector info service found.")
        get_projector_info = rospy.ServiceProxy('get_projector_info',
                                                projector.srv.GetProjectorInfo)
        self.projector_info = get_projector_info().projector_info

        self.projector_model = PinholeCameraModel()
        self.projector_model.fromCameraInfo(self.projector_info)

        self.read_scanner_info()

        self.projector_to_camera_rotation_matrix = cv.CreateMat(
            3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector,
                      self.projector_to_camera_rotation_matrix)

        predistortmap_x = cv.CreateMat(self.projector_info.height,
                                       self.projector_info.width, cv.CV_32FC1)
        predistortmap_y = cv.CreateMat(self.projector_info.height,
                                       self.projector_info.width, cv.CV_32FC1)
        InitPredistortMap(self.projector_model.intrinsicMatrix(),
                          self.projector_model.distortionCoeffs(),
                          predistortmap_x, predistortmap_y)

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_x)
        cv.AddS(predistortmap_x, -minVal, predistortmap_x)
        uncropped_projection_width = int(math.ceil(maxVal - minVal))
        self.center_pixel = self.projector_model.cx() - minVal

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_y)
        cv.AddS(predistortmap_y, -minVal, predistortmap_y)
        uncropped_projection_height = int(math.ceil(maxVal - minVal))

        self.number_of_scanlines = int(
            math.ceil(
                float(uncropped_projection_width) / self.pixels_per_scanline))

        rospy.loginfo("Generating projection patterns...")

        graycodes = []
        for i in range(self.number_of_scanlines):
            graycodes.append(graycodemath.generate_gray_code(i))

        self.number_of_projection_patterns = int(
            math.ceil(math.log(self.number_of_scanlines, 2)))
        self.predistorted_positive_projections = []
        self.predistorted_negative_projections = []

        for i in range(self.number_of_projection_patterns):
            cross_section = cv.CreateMat(1, uncropped_projection_width,
                                         cv.CV_8UC1)

            #Fill in cross section with the associated bit of each gray code
            for pixel in range(uncropped_projection_width):
                scanline = pixel // self.pixels_per_scanline
                scanline_value = graycodemath.get_bit(graycodes[scanline],
                                                      i) * 255
                cross_section[0, pixel] = scanline_value

            #Repeat the cross section over the entire image
            positive_projection = cv.CreateMat(uncropped_projection_height,
                                               uncropped_projection_width,
                                               cv.CV_8UC1)
            cv.Repeat(cross_section, positive_projection)

            #Predistort the projections to compensate for projector optics so that that the scanlines are approximately planar
            predistorted_positive_projection = cv.CreateMat(
                self.projector_info.height, self.projector_info.width,
                cv.CV_8UC1)
            cv.Remap(positive_projection,
                     predistorted_positive_projection,
                     predistortmap_x,
                     predistortmap_y,
                     flags=cv.CV_INTER_NN)

            #Create a negative of the pattern for thresholding
            predistorted_negative_projection = cv.CreateMat(
                self.projector_info.height, self.projector_info.width,
                cv.CV_8UC1)
            cv.Not(predistorted_positive_projection,
                   predistorted_negative_projection)

            #Fade the borders of the patterns to avoid artifacts at the edges of projection
            predistorted_positive_projection_faded = fade_edges(
                predistorted_positive_projection, 20)
            predistorted_negative_projection_faded = fade_edges(
                predistorted_negative_projection, 20)

            self.predistorted_positive_projections.append(
                predistorted_positive_projection_faded)
            self.predistorted_negative_projections.append(
                predistorted_negative_projection_faded)

        rospy.loginfo("Waiting for projection setting service...")
        rospy.wait_for_service('set_projection')
        rospy.loginfo("Projection setting service found.")
        self.set_projection = rospy.ServiceProxy('set_projection',
                                                 projector.srv.SetProjection)

        self.pixel_associations_msg = None
        self.point_cloud_msg = None

        rospy.Service("~get_point_cloud", graycode_scanner.srv.GetPointCloud,
                      self.handle_point_cloud_srv)

        point_cloud_pub = rospy.Publisher('~point_cloud',
                                          sensor_msgs.msg.PointCloud)

        rospy.loginfo("Ready.")

        rate = rospy.Rate(1)
        while not rospy.is_shutdown():
            if self.point_cloud_msg is not None:
                point_cloud_pub.publish(self.point_cloud_msg)
            rate.sleep()

    def read_scanner_info(self):
        try:
            input_stream = file(self.scanner_info_file_name, 'r')
        except IOError:
            rospy.loginfo('Specified scanner info file at ' +
                          self.scanner_info_file_name + ' does not exist.')
            return
        info_dict = yaml.load(input_stream)
        try:
            self.projector_to_camera_translation_vector = tuple(
                info_dict['projector_to_camera_translation_vector'])
            self.projector_to_camera_rotation_vector = list_to_matrix(
                info_dict['projector_to_camera_rotation_vector'], 3, 1,
                cv.CV_32FC1)
        except (TypeError, KeyError):
            rospy.loginfo('Scanner info file at ' +
                          self.scanner_info_file_name +
                          ' is in an unrecognized format.')
            return
        rospy.loginfo('Scanner info successfully read from ' +
                      self.scanner_info_file_name)

    def update_image(self, imagemsg):
        with self.mutex:
            if not self.image_update_flag.is_set():
                self.latest_image = self.bridge.imgmsg_to_cv(imagemsg, "mono8")
                self.image_update_flag.set()

    def get_next_image(self):
        with self.mutex:
            self.image_update_flag.clear()
        self.image_update_flag.wait()
        with self.mutex:
            return self.latest_image

    def get_picture_of_projection(self, projection):
        image_message = self.bridge.cv_to_imgmsg(projection, encoding="mono8")
        self.set_projection(image_message)
        return self.get_next_image()

    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(
                self.get_picture_of_projection(
                    self.predistorted_positive_projections[i]))
            negatives.append(
                self.get_picture_of_projection(
                    self.predistorted_negative_projections[i]))

        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height,
                                               self.camera_info.width,
                                               cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height,
                                       self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask,
                    cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height,
                                   self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)

            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height,
                                    self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2**i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height,
                                                   self.camera_info.width,
                                                   cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(
                cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)),
                -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(
                self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations,
                            projector_line_associations_bitshifted_right,
                            (2**-(2**i)), -0.5)
            cv.Xor(projector_line_associations,
                   projector_line_associations_bitshifted_right,
                   projector_line_associations)

        rospy.loginfo("Post processing...")

        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)

        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height,
                                             self.camera_info.width,
                                             cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines,
                    invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)

        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations

    def handle_point_cloud_srv(self, req):
        response = graycode_scanner.srv.GetPointCloudResponse()
        self.get_point_cloud()
        response.point_cloud = self.point_cloud_msg
        response.success = True
        return response

    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points,
                                             cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates,
                                                camera_model)

        intersection_points = ray_plane_intersections(camera_rays, planes)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]

            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg

    def scanline_numbers_to_planes(self, scanline_numbers):
        rows = scanline_numbers.height
        cols = scanline_numbers.width
        normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_x, -1)
        normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_y, 0)
        normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Copy(scanline_numbers, normal_vectors_z)

        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=self.pixels_per_scanline)
        cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z)
        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=1.0 / self.projector_model.fx())

        normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3)
        cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None,
                 normal_vectors)

        # Bring the normal vectors into camera coordinates
        cv.Transform(normal_vectors, normal_vectors,
                     self.projector_to_camera_rotation_matrix)

        normal_vectors_split = [None] * 3
        for i in range(3):
            normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Split(normal_vectors, normal_vectors_split[0],
                 normal_vectors_split[1], normal_vectors_split[2], None)

        n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.SetZero(n_dot_p)
        for i in range(3):
            cv.ScaleAdd(normal_vectors_split[i],
                        self.projector_to_camera_translation_vector[i],
                        n_dot_p, n_dot_p)

        planes = cv.CreateMat(rows, cols, cv.CV_32FC4)
        cv.Merge(normal_vectors_split[0], normal_vectors_split[1],
                 normal_vectors_split[2], n_dot_p, planes)

        return planes

    def display_scanline_associations(self, associations):
        display_image = cv.CreateMat(self.camera_info.height,
                                     self.camera_info.width, cv.CV_8UC1)
        cv.ConvertScale(associations, display_image,
                        255.0 / self.number_of_scanlines)
        cv.NamedWindow("associations", flags=0)
        cv.ShowImage("associations", display_image)
        cv.WaitKey(800)
Пример #9
0
class PixelConversion():
    def __init__(self):
        self.image_topic = "wall_camera/image_raw"
        self.image_info_topic = "wall_camera/camera_info"
        self.point_topic = "/clicked_point"
        self.frame_id = "/map"

        self.pixel_x = 350
        self.pixel_y = 215

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.image_topic
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        cv.MoveWindow(self.cv_window_name, 25, 75)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        self.image_sub = rospy.Subscriber(self.image_topic, Image, self.image_cb, queue_size = 1)
        self.image_info_sub = rospy.Subscriber(self.image_info_topic, CameraInfo, self.image_info_cb, queue_size = 1)
        self.point_sub = rospy.Subscriber(self.point_topic, PointStamped, self.point_cb, queue_size = 4)

        self.line_pub = rospy.Publisher("line", Marker, queue_size = 10)
        self.inter_pub = rospy.Publisher("inter", Marker, queue_size = 10)
        self.plane_pub = rospy.Publisher("plane", Marker, queue_size = 10)

        self.points = []
        self.ready_for_image = False
        self.has_pic = False
        self.camera_info_msg = None

        self.tf_listener = tf.TransformListener()

        # self.display_picture()
        self.camera_model = PinholeCameraModel()
        

    def point_cb(self, point_msg):
        if len(self.points) < 4:
            self.points.append(point_msg.point)

        if len(self.points) == 1:
            self.ready_for_image = True 
            print "Point stored from click: ", point_msg.point


    def image_cb(self, img_msg):
        if self.ready_for_image:
            # Use cv_bridge() to convert the ROS image to OpenCV format
            try:
                frame = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
            except CvBridgeError, e:
                print e
            
            # Convert the image to a Numpy array since most cv2 functions
            # require Numpy arrays.
            frame = np.array(frame, dtype=np.uint8)
            print "Got array"
            
            # Process the frame using the process_image() function
            #display_image = self.process_image(frame)
                           
            # Display the image.
            self.img = frame 
            # self.has_pic = True
            

            if self.camera_info_msg is not None:
                print "cx ", self.camera_model.cx()
                print "cy ", self.camera_model.cy()
                print "fx ", self.camera_model.fx()
                print "fy ", self.camera_model.fy()

                # Get the point from the clicked message and transform it into the camera frame                
                self.tf_listener.waitForTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                point_in_camera_frame = tuple(np.dot(tf_mat, np.array([self.points[0].x, self.points[0].y, self.points[0].z, 1.0])))[:3]
                # Get the pixels related to the clicked point (the point must be in the camera frame)
                pixel_coords = self.camera_model.project3dToPixel(point_in_camera_frame)
                print "Pixel coords ", pixel_coords

                # Get the unit vector related to the pixel coords 
                unit_vector = self.camera_model.projectPixelTo3dRay((int(pixel_coords[0]), int(pixel_coords[1])))
                print "Unit vector ", unit_vector

                # TF the unit vector of the pixel to the frame of the clicked point from the map frame
                # self.tf_listener.waitForTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0), rospy.Duration(0.5))
                # (trans,rot) = self.tf_listener.lookupTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0))
                # self.tfros = tf.TransformerROS()
                # tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                # xyz = tuple(np.dot(tf_mat, np.array([unit_vector[0], unit_vector[1], unit_vector[2], 1.0])))[:3]

                # print "World xyz ", xyz 

                # intersect the unit vector with the ground plane 

                red = (0,125,255)
                cv2.circle(self.img, (int(pixel_coords[0]), int(pixel_coords[1])), 30, red)
                cv2.imshow(self.image_topic, self.img)


                ''' trying better calculations for going from pixel to world '''

                # need plane of the map in camera frame points
                # (0, 0, 0), (1, 1, 0), (-1, -1, 0) = map plane in map frame coordinates
                p1 = [-94.0, -98.0, 0.0]
                p2 = [-92.0, -88.0, 0.0]
                p3 = [-84.0, -88.0, 0.0]
                # p2 = [1.0, -1.0, 0.0]
                # p3 = [-1.0, -1.0, 0.0]
                # point_marker = self.create_points_marker([p1, p2, p3], "/map")
                # self.plane_pub.publish(point_marker)

                self.tf_listener.waitForTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(img_msg.header.frame_id, self.frame_id,  rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)

                # pts in camera frame coodrds
                p1 = list(np.dot(tf_mat, np.array([p1[0], p1[1], p1[2], 1.0])))[:3]
                p2 = list(np.dot(tf_mat, np.array([p2[0], p2[1], p2[2], 1.0])))[:3]
                p3 = list(np.dot(tf_mat, np.array([p3[0], p3[1], p3[2], 1.0])))[:3]
                plane_norm = self.get_plane_normal(p1, p2, p3) ## maybe viz
                print "P1 ", p1
                print "P2 ", p2
                print "P3 ", p3
                print "Plane norm: ", plane_norm
                point_marker = self.create_points_marker([p1, p2, p3], img_msg.header.frame_id)
                self.plane_pub.publish(point_marker)

                
                # need unit vector to define ray to cast onto plane
                line_pt = list(unit_vector)
                line_norm = self.get_line_normal([0.0,0.0,0.0], np.asarray(line_pt) * 10) # just scale the unit vector for another point, maybe just use both unit vector for the point and the line

                inter = self.line_plane_intersection(line_pt, line_norm, p1, plane_norm)

                ## intersection is in the camera frame...
                ### tf this into map frame
                self.tf_listener.waitForTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                xyz = tuple(np.dot(tf_mat,np.array([inter[0], inter[1], inter[2], 1.0])))[:3]
                print "intersection pt: ", xyz
                point_marker = self.create_point_marker(xyz, "/map")
                self.inter_pub.publish(point_marker)

                line_marker = self.create_line_marker([0.0,0.0,0.0], np.asarray(unit_vector) * 30, img_msg.header.frame_id) 
                self.line_pub.publish(line_marker)



            self.ready_for_image = False
            self.points = []
Пример #10
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        global numFaces
        global brojach
        global avgx
        global avgy
        global avgr
        global flag
        global maxHeight
        global minHeight
        global maxDistRing
        if flag == 0:
            if brojach < 10:
                avgx = avgx + detection.pose.position.x
                avgy = avgy + detection.pose.position.y
                avgr = avgr + detection.pose.position.z
                brojach = brojach + 1
                return
            if brojach == 10:
                tp = detection
                avgx = avgx / 10
                avgy = avgy / 10
                avgr = avgr / 10
                camera_info = None
                best_time = 100
                for ci in self.camera_infos:
                    time = abs(ci.header.stamp.to_sec() -
                               detection.header.stamp.to_sec())
                    if time < best_time:
                        camera_info = ci
                        best_time = time

                if not camera_info or best_time > 1:
                    print("Best time is higher than 1")
                    tp.pose.position.z = -1
                    self.pub_avg_ring.publish(tp)
                    flag = 1
                    return

                camera_model = PinholeCameraModel()
                camera_model.fromCameraInfo(camera_info)

                p = Point(
                    ((avgx - camera_model.cx()) - camera_model.Tx()) /
                    camera_model.fx(),
                    ((avgy + avgr - camera_model.cy()) - camera_model.Ty()) /
                    camera_model.fy(), 0)
                #p2 = Point(((avgx - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),((avgy -avgr - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(), 0)
                avgx = avgy = avgr = 0
                lp = self.localize(detection.header, p1, self.region_scope)
                #lp2 = self.localize(detection.header, p2, self.region_scope)
                self.tf.waitForTransform(detection.header.frame_id, "map",
                                         rospy.Time.now(), rospy.Duration(5.0))
                mp = geometry_msgs.msg.PoseStamped()
                mp.header.frame_id = detection.header.frame_id
                mp.pose = lp.pose
                mp.header.stamp = detection.header.stamp
                tp = self.tf.transformPose("map", mp1)

                #mp2 = geometry_msgs.msg.PoseStamped()
                #mp2.header.frame_id = detection.header.frame_id
                #mp2.pose = lp2.pose
                #mp2.header.stamp = detection.header.stamp
                #tp2 = self.tf.transformPose("map", mp2)

                now2 = rospy.Time.now()
                self.tf2.waitForTransform("base_link", "map", now2,
                                          rospy.Duration(5.0))

                robot = geometry_msgs.msg.PoseStamped()
                robot.header.frame_id = "base_link"
                robot.pose.position.x = 0
                robot.pose.position.y = 0
                robot.header.stamp = now2
                robotmap = self.tf2.transformPose("map", robot)

                if lp1.pose.position.z != 0:
                    dp1 = self.distance(robotmap, tp)
                    if dp > maxDistRing:
                        print("Distance too big: ", dp)
                        tp.pose.position.z = -1
                    if tp.pose.position.z > maxHeight + 0.2 or tp.pose.position.z < minHeight:
                        print("Height is wrong: ", tp.pose.position.z)
                        tp.pose.position.z = -1
                    #if ((point1.y - point2.y)/2 > 0.7 or (point1.y - point2.y)/2 < 0.5):
                    #	tp.pose.position.z = -1
                    #if (point1.z > 0.5 or point1.z < 0.3):
                    #	tp.pose.position.z = -1
                    #if (point2.z > 0.5 or point2.z < 0.3): # visina
                    #	tp.pose.position.z = -1
                else:
                    print("Localizer failed")
                    tp.pose.position.z = -1
                self.pub_avg_ring.publish(tp)
                flag = 1
                return

        if flag == 1:
            u = detection.pose.position.x
            #u = avgx+avgr
            v = detection.pose.position.y + detection.pose.position.z
            #v = avgy

            w = detection.pose.position.x
            #w = avgx-avgr
            q = detection.pose.position.y - detection.pose.position.z

            g = detection.pose.position.x
            #w = avgx-avgr
            h = detection.pose.position.y + detection.pose.position.z + 3

            r = detection.pose.position.x
            #w = avgx-avgr
            t = detection.pose.position.y + detection.pose.position.z - 3
            #q = avgy

            camera_info = None
            best_time = 100
            for ci in self.camera_infos:
                time = abs(ci.header.stamp.to_sec() -
                           detection.header.stamp.to_sec())
                if time < best_time:
                    camera_info = ci
                    best_time = time

            if not camera_info or best_time > 1:
                print("Best time is higher than 1")
                return

            camera_model = PinholeCameraModel()
            camera_model.fromCameraInfo(camera_info)

            point1 = Point(((u - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((v - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point2 = Point(((w - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((q - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point3 = Point(((g - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((h - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point4 = Point(((r - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((t - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            localization1 = self.localize(detection.header, point1,
                                          self.region_scope)
            localization2 = self.localize(detection.header, point2,
                                          self.region_scope)
            localization3 = self.localize(detection.header, point3,
                                          self.region_scope)
            localization4 = self.localize(detection.header, point4,
                                          self.region_scope)
            if not (localization1 or localization2 or localization3
                    or localization):
                print("Localization failed")
                return
            now = detection.header.stamp
            self.tf.waitForTransform(detection.header.frame_id, "map", now,
                                     rospy.Duration(5.0))

            m = geometry_msgs.msg.PoseStamped()
            m.header.frame_id = detection.header.frame_id
            m.pose = localization1.pose
            m.header.stamp = detection.header.stamp
            transformedPoint1 = self.tf.transformPose("map", m)

            m2 = geometry_msgs.msg.PoseStamped()
            m2.header.frame_id = detection.header.frame_id
            m2.pose = localization2.pose
            m2.header.stamp = detection.header.stamp
            transformedPoint2 = self.tf.transformPose("map", m2)

            m3 = geometry_msgs.msg.PoseStamped()
            m3.header.frame_id = detection.header.frame_id
            m3.pose = localization3.pose
            m3.header.stamp = detection.header.stamp
            transformedPoint3 = self.tf.transformPose("map", m3)

            m4 = geometry_msgs.msg.PoseStamped()
            m4.header.frame_id = detection.header.frame_id
            m4.pose = localization4.pose
            m4.header.stamp = detection.header.stamp
            transformedPoint4 = self.tf.transformPose("map", m4)

            now2 = rospy.Time.now()
            self.tf2.waitForTransform("base_link", "map", now2,
                                      rospy.Duration(5.0))

            robot = geometry_msgs.msg.PoseStamped()
            robot.header.frame_id = "base_link"
            robot.pose.position.x = 0
            robot.pose.position.y = 0
            robot.header.stamp = now2
            robotmap = self.tf2.transformPose("map", robot)

            if localization1.pose.position.z != 0:
                dist1 = self.distance(robotmap, transformedPoint1)
            else:
                dist1 = 100000
            if localization2.pose.position.z != 0:
                dist2 = self.distance(robotmap, transformedPoint2)
            else:
                dist2 = 100000
            if localization3.pose.position.z != 0:
                dist3 = self.distance(robotmap, transformedPoint3)
            else:
                dist3 = 100000
            if localization4.pose.position.z != 0:
                dist4 = self.distance(robotmap, transformedPoint4)
            else:
                dist4 = 100000
            # find smallest distance to a point.
            dist = dist1
            transformedPoint = transformedPoint1

            if dist2 < dist:
                dist = dist2
                transformedPoint = transformedPoint2
            if dist3 < dist:
                dist = dist3
                transformedPoint = transformedPoint3
            if dist4 < dist:
                dist = dist4
                transformedPoint = transformedPoint4

            print("distance: ", dist)
            if (dist < maxDistRing):
                radius = abs((transformedPoint1.pose.position.y -
                              transformedPoint2.pose.position.y) / 2)
                print("radius: ", radius)
                print("height: ", transformedPoint.pose.position.z)
                if (dist1 < maxDistRing
                        and dist2 < maxDistRing) or (dist1 > maxDistRing
                                                     and dist2 > maxDistRing):
                    print("Checking radius")
                    if (radius > 0.7 or radius < 0.3):
                        print("Wrong radius")
                        return
                else:
                    print("Cant use radius")
                if (transformedPoint.pose.position.z > maxHeight
                        or transformedPoint.pose.position.z < minHeight):
                    print("Wrong height")
                    return
                print("start checking")
                print("Detected rings: ", len(detected))
                if len(detected) == 0:
                    beenDetected = False
                else:
                    beenDetected = False
                    for p in detected:
                        if self.distance(p, transformedPoint) <= maxDistance:
                            print("Already detected ring!")
                            beenDetected = True
                            break

                if (beenDetected == False):
                    detected.append(transformedPoint)
                    print("Publishing new ring")
                    self.pub_ring.publish(transformedPoint)
                    marker = Marker()
                    marker.header.stamp = detection.header.stamp
                    marker.header.frame_id = transformedPoint.header.frame_id
                    marker.pose.position.x = transformedPoint.pose.position.x
                    marker.pose.position.y = transformedPoint.pose.position.y
                    marker.pose.position.z = transformedPoint.pose.position.z
                    marker.type = Marker.CUBE
                    marker.action = Marker.ADD
                    marker.frame_locked = False
                    marker.lifetime = rospy.Duration.from_sec(1)
                    marker.id = self.marker_id_counter
                    marker.scale = Vector3(0.1, 0.1, 0.1)
                    marker.color = ColorRGBA(1, 0, 0, 1)
                    self.markers.markers.append(marker)
                    self.marker_id_counter += 1
                    print("Number of detected rings: ", len(detected))
                    #if len(detected) == numFaces:
                    #	print("Sending shutdown")
            else:
                print("Too far away")
Пример #11
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        global numFaces

        u = detection.x + detection.width / 2
        v = detection.y + detection.height / 2

        camera_info = None
        best_time = 100
        for ci in self.camera_infos:
            time = abs(ci.header.stamp.to_sec() -
                       detection.header.stamp.to_sec())
            if time < best_time:
                camera_info = ci
                best_time = time

        if not camera_info or best_time > 1:
            print("Best time is higher than 1")
            return

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(camera_info)

        point = Point(
            ((u - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),
            ((v - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(),
            1)

        localization = self.localize(detection.header, point,
                                     self.region_scope)
        transformedPoint = point
        if not localization:
            return

        now = detection.header.stamp
        self.tf.waitForTransform("camera_rgb_optical_frame", "map", now,
                                 rospy.Duration(5.0))

        m = geometry_msgs.msg.PoseStamped()
        m.header.frame_id = "camera_rgb_optical_frame"
        m.pose = localization.pose
        m.header.stamp = detection.header.stamp
        transformedPoint = self.tf.transformPose("map", m)

        if (localization.pose.position.x != 0.0):

            #print("Transformation: ", transformedPoint)
            beenDetected = False
            if counter == 0:
                lastAdded = transformedPoint
            else:
                lastAdded = transformedPoint
                for p in detected:

                    if self.distance(p, transformedPoint) <= maxDistance:
                        beenDetected = True
                        break

            if (beenDetected == False):
                counter += 1
                print("-----------------Good to go------------------------")
                detected.append(lastAdded)
                self.pub_face.publish(transformedPoint)
                marker = Marker()
                marker.header.stamp = detection.header.stamp
                marker.header.frame_id = detection.header.frame_id
                marker.pose = localization.pose
                marker.type = Marker.CUBE
                marker.action = Marker.ADD
                marker.frame_locked = False
                marker.lifetime = rospy.Duration.from_sec(1)
                marker.id = self.marker_id_counter
                marker.scale = Vector3(0.1, 0.1, 0.1)
                marker.color = ColorRGBA(1, 0, 0, 1)
                self.markers.markers.append(marker)
                self.marker_id_counter += 1
                #self.soundhandle.say("I detected a face.", blocking = False)
                print("Number of detected faces: ", len(detected))
                lastAdded = transformedPoint
Пример #12
0
class MarkerFinder():
    def __init__(self):
        self.tf_listener = tf.TransformListener()

        self.search = False
        self.last_image = None
        self.last_image_timestamp = None
        self.last_draw_image = None

        # This may need to be changed if you want to use a different image feed.
        self.image_sub = sub8_ros_tools.Image_Subscriber('/down/left/image_raw', self.image_cb)
        self.image_pub = sub8_ros_tools.Image_Publisher("vision/channel_marker/target_info")

        self.toggle = rospy.Service('vision/channel_marker/search', SetBool, self.toggle_search)

        self.cam = PinholeCameraModel()
        self.cam.fromCameraInfo(self.image_sub.wait_for_camera_info())
        self.rviz = rviz.RvizVisualizer()

        # self.occ_grid = MarkerOccGrid(self.image_sub, grid_res=.05, grid_width=500, grid_height=500,
        #                               grid_starting_pose=Pose2D(x=250, y=250, theta=0))
        if rospy.get_param("/orange_markers/use_boost"):
            path = os.path.join(rospack.get_path('sub8_perception'),
                'ml_classifiers/marker/' + rospy.get_param("/orange_markers/marker_classifier"))

            self.boost = cv2.Boost()
            rospy.loginfo("MARKER - Loading classifier from {}".format(path))
            self.boost.load(path)
            rospy.loginfo("MARKER - Classifier for marker loaded.")
        else:
            self.lower = np.array(rospy.get_param('/color/channel_marker/hsv_low'))
            self.upper = np.array(rospy.get_param('/color/channel_marker/hsv_high'))

        self.pose_service = rospy.Service("vision/channel_marker/pose", VisionRequest, self.request_marker)

        self.kernel = np.ones((15, 15), np.uint8)

        # Occasional status publisher
        self.timer = rospy.Timer(rospy.Duration(1), self.publish_target_info)

        print "MARKER - Got no patience for sittin' around!"


    def toggle_search(self, srv):
        if srv.data:
            rospy.loginfo("MARKER - Looking for markers now.")
            self.search = True
        else:
            rospy.loginfo("MARKER - Done looking for markers.")
            self.search = False

        return SetBoolResponse(success=srv.data)

    def publish_target_info(self, *args):
        if not self.search or self.last_image is None:
            return

        markers = self.find_marker(np.copy(self.last_image))
        #self.occ_grid.update_grid(self.last_image_timestamp)
        #self.occ_grid.add_marker(markers, self.last_image_timestamp)

        if self.last_draw_image is not None: #and (markers is not None):
            self.image_pub.publish(np.uint8(self.last_draw_image))

    def image_cb(self, image):
        '''Hang on to last image and when it was taken.'''
        self.last_image = image
        self.last_image_timestamp = self.image_sub.last_image_time

    def calculate_threshold(self, img, agression=.5):
        histr = cv2.calcHist([img], [0], None, [179], [0, 179])
        threshold = np.uint8((179 - np.argmax(histr)) * agression)
        return threshold

    def get_2d_pose(self, mask):
        # estimate covariance matrix and get corresponding eigenvectors
        wh = np.where(mask)[::-1]
        cov = np.cov(wh)
        eig_vals, eig_vects = np.linalg.eig(cov)

        # use index of max eigenvalue to find max eigenvector
        i = np.argmax(eig_vals)
        max_eigv = eig_vects[:, i] * np.sqrt(eig_vals[i])

        # flip indices to find min eigenvector
        min_eigv = eig_vects[:, 1 - i] * np.sqrt(eig_vals[1 - i])

        # define center of pipe
        center = np.average(wh, axis=1)

        # define vertical vector (sub's current direction)
        vert_vect = np.array([0.0, -1.0])

        if max_eigv[1] > 0:
            max_eigv = -max_eigv
            min_eigv = -min_eigv

        num = np.cross(max_eigv, vert_vect)
        denom = np.linalg.norm(max_eigv) * np.linalg.norm(vert_vect)
        angle_rad = np.arcsin(num / denom)

        return center, angle_rad, [max_eigv, min_eigv]

    def find_marker(self, img):
        #img[:, -100:] = 0
        img = cv2.GaussianBlur(img, (7, 7), 15)
        last_image_timestamp = self.last_image_timestamp

        if rospy.get_param("/orange_markers/use_boost"):
            some_observations = machine_learning.boost.observe(img)
            prediction = [int(x) for x in [self.boost.predict(obs) for obs in some_observations]]
            mask = np.reshape(prediction, img[:, :, 2].shape).astype(np.uint8) * 255
        else:
            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv, self.lower, self.upper)


        kernel = np.ones((5,5),np.uint8)
        mask = cv2.dilate(mask, kernel, iterations = 1)
        mask = cv2.erode(mask, kernel, iterations = 2)

        #mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, self.kernel)
        contours, _ = cv2.findContours(np.copy(mask), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        if len(contours) < 1:
            rospy.logwarn("MARKER - No marker found.")
            return None

        # Find biggest area contour
        self.last_draw_image = np.dstack([mask] * 3)
        areas = [cv2.contourArea(c) for c in contours]
        max_index = np.argmax(areas)
        cnt = contours[max_index]

        # Draw a miniAreaRect around the contour and find the area of that.
        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        mask = np.zeros(shape=mask.shape)
        cv2.drawContours(self.last_draw_image, [box], 0, (0, 128, 255), 2)
        cv2.drawContours(mask, [box], 0, 255, -1)
        rect_area = cv2.contourArea(box)

        center, angle_rad, [max_eigv, min_eigv] = self.get_2d_pose(mask)

        cv2.line(self.last_draw_image, tuple(np.int0(center)), tuple(np.int0(center + (2 * max_eigv))), (0, 255, 30), 2)
        cv2.line(self.last_draw_image, tuple(np.int0(center)), tuple(np.int0(center + (2 * min_eigv))), (0, 30, 255), 2)

        # Check if the box is too big or small.
        xy_position, height = self.get_tf(timestamp=last_image_timestamp)
        expected_area = self.calculate_marker_area(height)

        print "{} < {} < {} ({})".format(expected_area * .2, rect_area, expected_area * 2, expected_area)
        if expected_area * .5 < rect_area < expected_area * 2:
            #cv2.drawContours(self.last_draw_image, [box], 0, (255, 255, 255), -1)
            self.rviz.draw_ray_3d(center, self.cam, np.array([1, .5, 0, 1]), frame='/downward',
                _id=5, length=height, timestamp=last_image_timestamp)
        else:
            angle_rad = 0
            max_eigv = np.array([0, -20])
            min_eigv = np.array([-20, 0])
            #cv2.drawContours(self.last_draw_image, [box], 0, (255, 0, 30), -1)
            rospy.logwarn("MARKER - Size out of bounds!")

        self.rviz.draw_ray_3d(center, self.cam, np.array([1, .5, 0, 1]), frame='/downward',
            _id=5, length=height, timestamp=last_image_timestamp)

        # Convert to a 3d pose to move the sub to.
        abs_position = self.transform_px_to_m(center, last_image_timestamp)
        q_rot = tf.transformations.quaternion_from_euler(0, 0, angle_rad)

        return numpy_quat_pair_to_pose(abs_position, q_rot)

    def request_marker(self, data):
        if self.last_image is None:
            return False  # Fail if we have no images cached

        timestamp = self.last_image_timestamp
        goal_pose = self.find_marker(self.last_image)
        found = (goal_pose is not None)

        if not found:
            resp = VisionRequestResponse(
                found=found
            )
        else:
            resp = VisionRequestResponse(
                pose=PoseStamped(
                    header=Header(
                        stamp=timestamp,
                        frame_id='/down_camera'),
                    pose=goal_pose
                ),
                found=found
            )
        return resp

    def get_tf(self, timestamp=None, get_rotation=False):
        '''
        x_y position, height in meters and quat rotation of the sub if requested
        '''
        if timestamp is None:
            timestamp = rospy.Time()

        self.tf_listener.waitForTransform("/map", "/downward", timestamp, rospy.Duration(5.0))
        trans, rot = self.tf_listener.lookupTransform("/map", "/downward", timestamp)
        x_y_position = trans[:2]
        self.tf_listener.waitForTransform("/ground", "/downward", timestamp, rospy.Duration(5.0))
        trans, _ = self.tf_listener.lookupTransform("/ground", "/downward", timestamp)

        height = np.nan_to_num(trans[2])
        x_y_position = np.nan_to_num(x_y_position)

        if get_rotation:
            return x_y_position, rot, height

        return x_y_position, height

    def transform_px_to_m(self, m_position, timestamp):
        '''
        Finds the absolute position of the marker in meters.
        '''
        xy_position, q, height = self.get_tf(timestamp, get_rotation=True)

        dir_vector = unit_vector(np.array([self.cam.cx(), self.cam.cy()]) - m_position)
        cam_rotation = tf.transformations.euler_from_quaternion(q)[2] + np.pi / 2
        print "MARKER - dir_vector:", dir_vector
        print "MARKER - cam_rotation:", cam_rotation
        dir_vector = np.dot(dir_vector, make_2D_rotation(cam_rotation))

        # Calculate distance from middle of frame to marker in meters.
        magnitude = self.calculate_visual_radius(height, second_point=m_position)
        abs_position = np.append(xy_position + dir_vector[::-1] * magnitude, -SEARCH_DEPTH)

        return abs_position

    def calculate_visual_radius(self, height, second_point=None):
        '''
        Draws rays to find the radius of the FOV of the camera in meters.
        It also can work to find the distance between two planar points some distance from the camera.
        '''

        mid_ray = np.array([0, 0, 1])

        if second_point is None:
            if self.cam.cy() < self.cam.cx():
                second_point = np.array([self.cam.cx(), 0])
            else:
                second_point = np.array([0, self.cam.cy()])

        edge_ray = unit_vector(self.cam.projectPixelTo3dRay(second_point))

        # Calculate angle between vectors and use that to find r
        theta = np.arccos(np.dot(mid_ray, edge_ray))
        return np.tan(theta) * height

    def calculate_marker_area(self, height):
        '''
        What we really don't want is to find markers that are on the edge since the direction and center of
            the marker are off.
        '''
        MARKER_LENGTH = 1.22  # m
        MARKER_WIDTH = .1524  # m

        # Get m/px on the ground floor.
        m = self.calculate_visual_radius(height)
        if self.cam.cy() < self.cam.cx():
            px = self.cam.cy()
        else:
            px = self.cam.cx()

        m_px = m / px
        marker_area_m = MARKER_WIDTH * MARKER_LENGTH
        marker_area_px = marker_area_m / (m_px ** 2)

        return marker_area_px
class GridToImageConverter():
    def __init__(self):
        self.image_topic = "wall_camera/image_raw"
        self.image_info_topic = "wall_camera/camera_info"
        self.point_topic = "/clicked_point"
        self.frame_id = "/map"
        self.occupancy 

        self.pixel_x = 350
        self.pixel_y = 215

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.image_topic
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        cv.MoveWindow(self.cv_window_name, 25, 75)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        self.image_sub = rospy.Subscriber(self.image_topic, Image, self.image_cb, queue_size = 1)
        self.image_info_sub = rospy.Subscriber(self.image_info_topic, CameraInfo, self.image_info_cb, queue_size = 1)
        self.point_sub = rospy.Subscriber(self.point_topic, PointStamped, self.point_cb, queue_size = 4)

        self.points = []
        self.ready_for_image = False
        self.has_pic = False
        self.camera_info_msg = None

        self.tf_listener = tf.TransformListener()

        # self.display_picture()
        self.camera_model = PinholeCameraModel()
        

    def point_cb(self, point_msg):
        if len(self.points) < 4:
            self.points.append(point_msg.point)

        if len(self.points) == 1:
            self.ready_for_image = True 
            print "Point stored from click: ", point_msg.point


    def image_cb(self, img_msg):
        if self.ready_for_image:
            # Use cv_bridge() to convert the ROS image to OpenCV format
            try:
                frame = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
            except CvBridgeError, e:
                print e
            
            # Convert the image to a Numpy array since most cv2 functions
            # require Numpy arrays.
            frame = np.array(frame, dtype=np.uint8)
            print "Got array"
            
            # Process the frame using the process_image() function
            #display_image = self.process_image(frame)
                           
            # Display the image.
            self.img = frame 
            # self.has_pic = True
            

            if self.camera_info_msg is not None:
                print "cx ", self.camera_model.cx()
                print "cy ", self.camera_model.cy()
                print "fx ", self.camera_model.fx()
                print "fy ", self.camera_model.fy()

                # Get the point from the clicked message and transform it into the camera frame                
                self.tf_listener.waitForTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                point_in_camera_frame = tuple(np.dot(tf_mat, np.array([self.points[0].x, self.points[0].y, self.points[0].z, 1.0])))[:3]
                # Get the pixels related to the clicked point (the point must be in the camera frame)
                pixel_coords = self.camera_model.project3dToPixel(point_in_camera_frame)
                print "Pixel coords ", pixel_coords

                # Get the unit vector related to the pixel coords 
                unit_vector = self.camera_model.projectPixelTo3dRay((int(pixel_coords[0]), int(pixel_coords[1])))
                print "Unit vector ", unit_vector

                # TF the unit vector of the pixel to the frame of the clicked point from the map frame
                self.tf_listener.waitForTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                xyz = tuple(np.dot(tf_mat, np.array([unit_vector[0], unit_vector[1], unit_vector[2], 1.0])))[:3]

                print "World xyz ", xyz 

                red = (0,125,255)
                cv2.circle(self.img, (int(pixel_coords[0]), int(pixel_coords[1])), 30, red)
                cv2.imshow(self.image_topic, self.img)


            self.ready_for_image = False
            self.points = []
            
            # Process any keyboard commands
            self.keystroke = cv.WaitKey(5)
            if 32 <= self.keystroke and self.keystroke < 128:
                cc = chr(self.keystroke).lower()
                if cc == 'q':
                    # The user has press the q key, so exit
                    rospy.signal_shutdown("User hit q key to quit.")
Пример #14
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        global numFaces
        global brojach
        global avgx
        global avgy
        global avgr
        global flag
        if flag == 0:
            if brojach < 10:
                avgx = avgx + detection.pose.position.x
                avgy = avgy + detection.pose.position.y
                avgr = avgr + detection.pose.position.z
                brojach = brojach + 1
                return
            if brojach == 10:
                avgx = avgx / 10
                avgy = avgy / 10
                avgr = avgr / 10
                p = Point(((avgx - camera_model.cx()) - camera_model.Tx()) /
                          camera_model.fx(),
                          ((avgy - camera_model.cy()) - camera_model.Ty()) /
                          camera_model.fy(), avgr)
                lp = self.localize(detection.header, p, self.region_scope)
                self.tf.waitForTransform(detection.header.frame_id, "map",
                                         rospy.Time.now(), rospy.Duration(5.0))
                mp = geometry_msgs.msg.PoseStamped()
                mp.header.frame_id = detection.header.frame_id
                mp.pose = lp.pose
                mp.header.stamp = detection.header.stamp
                tp = self.tf.transformPose("map", mp)
                if lp.pose.position.x != 0:
                    dp = self.distance(robotmap, tp)
                    if dp > 1.5:
                        tp.pose.position.z = -1
                    #if ((point1.y - point2.y)/2 > 0.7 or (point1.y - point2.y)/2 < 0.5):
                    #	tp.pose.position.z = -1
                    #if (point1.z > 0.5 or point1.z < 0.3):
                    #	tp.pose.position.z = -1
                    #if (point2.z > 0.5 or point2.z < 0.3): # visina
                    #	tp.pose.position.z = -1
                else:
                    tp.pose.position.z = -1
                self.pub_avg_ring.publish(tp)
                flag = 1
                return

        if flag == 1:
            u = detection.pose.position.x
            #u = avgx+avgr
            v = detection.pose.position.y + detection.pose.position.z
            #v = avgy

            w = detection.pose.position.x
            #w = avgx-avgr
            q = detection.pose.position.y - detection.pose.position.z

            g = detection.pose.position.x
            #w = avgx-avgr
            h = detection.pose.position.y + detection.pose.position.z + 4

            r = detection.pose.position.x
            #w = avgx-avgr
            t = detection.pose.position.y + detection.pose.position.z - 4
            #q = avgy

            camera_info = None
            best_time = 100
            for ci in self.camera_infos:
                time = abs(ci.header.stamp.to_sec() -
                           detection.header.stamp.to_sec())
                if time < best_time:
                    camera_info = ci
                    best_time = time

            if not camera_info or best_time > 1:
                print("Best time is higher than 1")
                return

            camera_model = PinholeCameraModel()
            camera_model.fromCameraInfo(camera_info)

            point1 = Point(((u - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((v - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point2 = Point(((w - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((q - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point3 = Point(((g - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((h - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point4 = Point(((r - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((t - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            localization1 = self.localize(detection.header, point1,
                                          self.region_scope)
            localization2 = self.localize(detection.header, point2,
                                          self.region_scope)
            localization3 = self.localize(detection.header, point3,
                                          self.region_scope)
            localization4 = self.localize(detection.header, point4,
                                          self.region_scope)
            if not localization1:
                return

            # calculate center
            center = Point(localization1.pose.position.x,
                           localization1.pose.position.y,
                           localization1.pose.position.z)
            now = detection.header.stamp
            self.tf.waitForTransform(detection.header.frame_id, "map", now,
                                     rospy.Duration(5.0))

            m = geometry_msgs.msg.PoseStamped()
            m.header.frame_id = detection.header.frame_id
            #m.pose.position.x = center.x
            #m.pose.position.y = center.y
            m.pose = localization1.pose
            m.header.stamp = detection.header.stamp
            transformedPoint1 = self.tf.transformPose("map", m)

            m2 = geometry_msgs.msg.PoseStamped()
            m2.header.frame_id = detection.header.frame_id
            m2.pose = localization2.pose
            m2.header.stamp = detection.header.stamp
            transformedPoint2 = self.tf.transformPose("map", m2)

            m3 = geometry_msgs.msg.PoseStamped()
            m3.header.frame_id = detection.header.frame_id
            m3.pose = localization3.pose
            m3.header.stamp = detection.header.stamp
            transformedPoint3 = self.tf.transformPose("map", m3)

            m4 = geometry_msgs.msg.PoseStamped()
            m4.header.frame_id = detection.header.frame_id
            m4.pose = localization4.pose
            m4.header.stamp = detection.header.stamp
            transformedPoint4 = self.tf.transformPose("map", m4)

            now2 = rospy.Time.now()
            self.tf2.waitForTransform("base_link", "map", now2,
                                      rospy.Duration(5.0))

            robot = geometry_msgs.msg.PoseStamped()
            robot.header.frame_id = "base_link"
            robot.pose.position.x = 0
            robot.pose.position.y = 0
            #robot.pose = localization1.pose
            robot.header.stamp = now2
            robotmap = self.tf2.transformPose("map", robot)

            #dist = self.distance(robotmap,transformedPoint)
            transformedPoint = transformedPoint1
            if localization1.pose.position.x != 0:
                dist1 = self.distance(robotmap, transformedPoint1)
            else:
                dist1 = 100000
            if localization2.pose.position.x != 0:
                dist2 = self.distance(robotmap, transformedPoint2)
                transformedPoint = transformedPoint2
            else:
                dist2 = 100000
            if localization3.pose.position.x != 0:
                dist3 = self.distance(robotmap, transformedPoint3)
                transformedPoint = transformedPoint3
            else:
                dist3 = 100000
            if localization4.pose.position.x != 0:
                dist4 = self.distance(robotmap, transformedPoint4)
                transformedPoint = transformedPoint4
            else:
                dist4 = 100000
            # find smallest distance to a point.
            dist = dist1
            if dist2 < dist:
                dist = dist2
            if dist3 < dist:
                dist = dist3
            if dist4 < dist:
                dist = dist4

            print("distance: ", dist)
            if (dist < 1.5):
                rad = abs((point1.y - point2.y) / 2)
                print("radius: ", rad)
                print("height p1: ", point1.z)
                print("height p2: ", point2.z)
                #if ((point1.y - point2.y)/2 > 0.7 or (point1.y - point2.y)/2 < 0.5):
                #	return
                #if (point1.z > 0.5 or point1.z < 0.3):
                #	return
                #if (point2.z > 0.5 or point2.z < 0.3): # visina
                #	return
                self.pub.publish(transformedPoint)
                print("start checking")
                print(len(detected))
                if len(detected) == 0:
                    detected.append(transformedPoint)
                    beenDetected = False
                else:
                    beenDetected = False
                    for p in detected:
                        if self.distance(p, transformedPoint) <= maxDistance:
                            print("Already detected ring!")
                            beenDetected = True
                            break

                if (beenDetected == False):
                    print("Publishing new ring")
                    self.pub_ring.publish(transformedPoint)
                    marker = Marker()
                    marker.header.stamp = detection.header.stamp
                    marker.header.frame_id = transformedPoint.header.frame_id
                    marker.pose.position.x = transformedPoint.pose.position.x
                    marker.pose.position.y = transformedPoint.pose.position.y
                    marker.pose.position.z = transformedPoint.pose.position.z
                    marker.type = Marker.CUBE
                    marker.action = Marker.ADD
                    marker.frame_locked = False
                    marker.lifetime = rospy.Duration.from_sec(1)
                    marker.id = self.marker_id_counter
                    marker.scale = Vector3(0.1, 0.1, 0.1)
                    marker.color = ColorRGBA(1, 0, 0, 1)
                    self.markers.markers.append(marker)
                    self.marker_id_counter += 1
                    print("Number of detected rings: ", len(detected))
                    if len(detected) == numFaces:
                        print("Sending shutdown")
            else:
                print("Just zeros")
class Realsense:
    """
    Class to use a Relsense camera for Hand detection
    """
    def __init__(self):
        """
        Constructor
        """
        rospy.init_node("ros_yolo", anonymous=True, disable_signals=False)

        self.bridge = CvBridge()
        self.rgb_lock = threading.Lock()
        self.depth_lock = threading.Lock()

        self.cameraModel = PinholeCameraModel()
        self.cameraInfo = CameraInfo()

        self.sub_color = rospy.Subscriber("/camera/color/image_raw",
                                          Image,
                                          self.color_callback,
                                          queue_size=1)

        self.sub_depth = rospy.Subscriber(
            "/camera/aligned_depth_to_color/image_raw",
            Image,
            self.depth_callback,
            queue_size=1)
        self.sub_camera_info = rospy.Subscriber("/camera/depth/camera_info",
                                                CameraInfo,
                                                self.camera_info_callback,
                                                queue_size=1)

        self.target_pub = rospy.Publisher("/hand_detection/target_camera",
                                          PoseStamped)

        self.color_img = np.zeros((480, 640, 3), np.uint8)
        self.depth_img = np.zeros((480, 640), np.uint8)
        self.img_width = 640
        self.img_height = 480
        self.hand_counter = 0
        self.not_hand_counter = 0
        self.hand_3D_pos = np.empty((0, 3), int)

    def _random_centered_pixels(self, box_x, box_y, box_w, box_h, radius,
                                samples):
        """
        Returns an array of pixel positions in 2D
        """
        pixels = []
        center_x = int(box_w / 2)
        center_y = int(box_h / 2)
        x = box_x + center_x - radius
        y = box_y + center_y - radius
        samples_limit = radius * radius

        if samples < samples_limit:
            random_positions = random.sample(range(0, samples_limit), samples)
            for sample in random_positions:
                pixel_x = x + (sample % radius)
                if pixel_x > (self.img_width / 2 - 1):
                    pixel_x = self.img_width / 2 - 1
                pixel_y = y + int(sample / radius)
                if pixel_y > (self.img_height / 2 - 1):
                    pixel_y = self.img_height / 2 - 1
                pixels.append([pixel_x, pixel_y])
        else:
            print("The samples number should be minor" + " than radius*radius")
        return pixels

    def _load_model(self, custom=True):
        if custom:
            model_def_path = "config/yolov3-custom-hand-arm.cfg"
            weights_path = "checkpoints-arm-hand/yolov3_ckpt_54.pth"
            class_path = "data/custom/classes.names"
        else:
            model_def_path = "config/yolov3.cfg"
            weights_path = "weights/yolov3.weights"
            class_path = "data/coco.names"

        self.img_size = 416

        # Object confidence threshold
        self.conf_thres = 0.95
        # IOU thresshold for non-maximum suppression
        self.nms_thres = 0.05

        if torch.cuda.is_available():
            print("---------GPU AVAILABLE---------")
            self.Tensor = torch.cuda.FloatTensor
        else:
            print("---------CPU AVAILABLE---------")
            self.Tensor = torch.FloatTensor

        # Create the model
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = Darknet(model_def_path, img_size=416).to(device)

        # Load the custom weights
        if custom:
            self.model.load_state_dict(torch.load(weights_path))
        else:
            self.model.load_darknet_weights(weights_path)

        self.model.eval()

        self.classes = load_classes(class_path)

        cmap = plt.get_cmap("tab20b")
        colors = [cmap(i) for i in np.linspace(0, 1, 10000)]
        self.bbox_colors = random.sample(colors, len(self.classes))

    def _infer(self):
        with self.rgb_lock:
            frame = self.color_img.copy()
            # frame = self.color_img
        # Display only purpose
        img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        img = ImagePIL.fromarray(frame)
        ratio = min(self.img_size / img.size[0], self.img_size / img.size[1])
        imw = round(img.size[0] * ratio)
        imh = round(img.size[1] * ratio)
        img_transforms = transforms.Compose([
            transforms.Resize((imh, imw)),
            transforms.Pad((max(int(
                (imh - imw) / 2), 0), max(int(
                    (imw - imh) / 2), 0), max(int(
                        (imh - imw) / 2), 0), max(int((imw - imh) / 2), 0)),
                           (128, 128, 128)),
            transforms.ToTensor(),
        ])

        image_tensor = img_transforms(img).float()
        image_tensor = image_tensor.unsqueeze_(0)
        input_img = Variable(image_tensor.type(self.Tensor))

        with torch.no_grad():
            detections = self.model(input_img)
            detections = non_max_suppression(detections, self.conf_thres,
                                             self.nms_thres)

        if detections[0] is not None:

            unit_scaling = 0.001
            center_x = self.cameraModel.cx()
            center_y = self.cameraModel.cy()
            constant_x = unit_scaling / self.cameraModel.fx()
            constant_y = unit_scaling / self.cameraModel.fy()

            # Rescale boxes to original image
            detections = rescale_boxes(detections[0], self.img_size,
                                       frame.shape[:2])

            unique_labels = detections[:, -1].cpu().unique()

            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                box_w = x2 - x1
                box_h = y2 - y1
                w_proportion = box_w / img.size[0]
                h_proportion = box_h / img.size[1]
                if ((self.classes[int(cls_pred)] == 'Human hand'
                     or self.classes[int(cls_pred)] == 'Human arm')
                        and w_proportion >= 0.1 and w_proportion <= 0.90
                        and h_proportion >= 0.1 and h_proportion <= 1.0
                        and box_w > 10 and box_h > 10):

                    color = self.bbox_colors[int(
                        np.where(unique_labels == int(cls_pred))[0])]

                    frame = cv2.rectangle(
                        frame, (x1, y1), (x2, y2),
                        (color[0] * 255, color[1] * 255, color[2] * 255), 2)

                    cv2.putText(frame, self.classes[int(cls_pred)], (x1, y1),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                2, cv2.LINE_AA)

                    radius = 5
                    samples = 10
                    pixels = self._random_centered_pixels(
                        float(x1), float(y1), box_w, box_h, radius, samples)
                    depthPoint = []
                    arrayPoints = np.empty((0, 3), int)

                    for i in range(0, samples - 1):
                        x = int(pixels[i][0])
                        y = int(pixels[i][1])
                        depthPoint = np.array([
                            (x - center_x / 2) *
                            self.depth_img[y, x] * constant_x,
                            (y - center_y / 2) * self.depth_img[y, x] *
                            constant_y, self.depth_img[y, x] * unit_scaling
                        ])
                        if depthPoint.sum() > 0:
                            arrayPoints = np.append(arrayPoints, [depthPoint],
                                                    axis=0)

                    if len(arrayPoints) > 0:
                        mean_x = np.mean(arrayPoints[:, 0], axis=0)
                        mean_y = np.mean(arrayPoints[:, 1], axis=0)
                        mean_z = np.mean(arrayPoints[:, 2], axis=0)

                        pos_3D = np.array([mean_x, mean_y, mean_z])
                        if self.hand_counter == 0:
                            self.hand_3D_pos = pos_3D
                            self.hand_counter = 1
                        elif mean_z > 0.25 and mean_z < 1.20:
                            dist = np.linalg.norm(self.hand_3D_pos - pos_3D)
                            if ((dist < 0.10)
                                    or (pos_3D[2] < self.hand_3D_pos[2])):
                                self.hand_3D_pos = pos_3D
                                self.hand_counter = self.hand_counter + 1

                                if self.hand_counter > 2:
                                    # if self.hand_counter == 3:
                                    #    cv2.imwrite('hand.jpg', frame)

                                    header = std_msgs.msg.Header()
                                    header.stamp = rospy.Time.now()
                                    header.frame_id = 'RealSense_optical_frame'
                                    # publish
                                    p = PoseStamped()
                                    p.header.frame_id = 'target_camera'
                                    p.header.stamp = rospy.Time.now()

                                    p.pose.position.x = mean_x
                                    p.pose.position.y = mean_y
                                    p.pose.position.z = mean_z
                                    self.target_pub.publish(p)
                                    break
                            else:
                                self.not_hand_counter = self.not_hand_counter + 1

            if self.not_hand_counter > 20:
                self.hand_counter = 0
                self.not_hand_counter = 0

        cv2.imshow('frame', frame)
        cv2.waitKey(1)

    def color_callback(self, data):
        """
        Callback function for color image subscriber
        """
        try:
            with self.rgb_lock:
                cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
                self.color_img = cv_image
        except CvBridgeError as e:
            print(e)

    def depth_callback(self, data):
        """
        Callback function for depth image subscriber
        """
        try:
            with self.depth_lock:
                cv_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
                self.depth_img = np.array(cv_image, dtype=np.float32)
        except CvBridgeError as e:
            print(e)

    def camera_info_callback(self, data):
        """
        Callback function for camera info subscriber
        """
        try:
            self.cameraModel.fromCameraInfo(data)
            self.cameraInfo = data
        except Error as e:
            print(e)
Пример #16
0
class Scanner:
    def __init__(self):
        self.pixels_per_scanline = rospy.get_param('~pixels_per_scanline')
        self.scanner_info_file_name = rospy.get_param('~scanner_info_file_name')
        self.threshold = rospy.get_param('~threshold')        

        self.mutex = threading.RLock()
        self.image_update_flag = threading.Event()
        self.bridge = CvBridge()
        rospy.Subscriber("image_stream", sensor_msgs.msg.Image, self.update_image)
        rospy.loginfo("Waiting for camera info...")
        self.camera_info = rospy.wait_for_message('camera_info', sensor_msgs.msg.CameraInfo)
        rospy.loginfo("Camera info received.")

        rospy.loginfo("Waiting for projector info service...")
        rospy.wait_for_service('get_projector_info')
        rospy.loginfo("Projector info service found.")
        get_projector_info = rospy.ServiceProxy('get_projector_info', projector.srv.GetProjectorInfo)
        self.projector_info = get_projector_info().projector_info

        self.projector_model = PinholeCameraModel()
        self.projector_model.fromCameraInfo(self.projector_info)

        self.read_scanner_info()

        self.projector_to_camera_rotation_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector, self.projector_to_camera_rotation_matrix)        

        predistortmap_x = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_32FC1)
        predistortmap_y = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_32FC1)
        InitPredistortMap(self.projector_model.intrinsicMatrix(), self.projector_model.distortionCoeffs(), predistortmap_x, predistortmap_y)

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_x)
        cv.AddS(predistortmap_x, -minVal, predistortmap_x)
        uncropped_projection_width = int(math.ceil(maxVal - minVal))
        self.center_pixel = self.projector_model.cx() - minVal

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_y)
        cv.AddS(predistortmap_y, -minVal, predistortmap_y)
        uncropped_projection_height = int(math.ceil(maxVal - minVal))

        self.number_of_scanlines = int(math.ceil(float(uncropped_projection_width)/self.pixels_per_scanline))

        rospy.loginfo("Generating projection patterns...")

        graycodes = []
        for i in range(self.number_of_scanlines):
            graycodes.append(graycodemath.generate_gray_code(i))
    
        self.number_of_projection_patterns = int(math.ceil(math.log(self.number_of_scanlines, 2)))        
        self.predistorted_positive_projections = []
        self.predistorted_negative_projections = []

        for i in range(self.number_of_projection_patterns):
            cross_section = cv.CreateMat(1, uncropped_projection_width, cv.CV_8UC1)

            #Fill in cross section with the associated bit of each gray code
            for pixel in range(uncropped_projection_width):
                scanline = pixel // self.pixels_per_scanline
                scanline_value = graycodemath.get_bit(graycodes[scanline], i) * 255
                cross_section[0, pixel] = scanline_value

            #Repeat the cross section over the entire image
            positive_projection = cv.CreateMat(uncropped_projection_height, uncropped_projection_width, cv.CV_8UC1)
            cv.Repeat(cross_section, positive_projection)

            #Predistort the projections to compensate for projector optics so that that the scanlines are approximately planar
            predistorted_positive_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
            cv.Remap(positive_projection, predistorted_positive_projection, predistortmap_x, predistortmap_y, flags=cv.CV_INTER_NN)

            #Create a negative of the pattern for thresholding
            predistorted_negative_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
            cv.Not(predistorted_positive_projection, predistorted_negative_projection)
                
            #Fade the borders of the patterns to avoid artifacts at the edges of projection
            predistorted_positive_projection_faded = fade_edges(predistorted_positive_projection, 20)
            predistorted_negative_projection_faded = fade_edges(predistorted_negative_projection, 20)
                
            self.predistorted_positive_projections.append(predistorted_positive_projection_faded)
            self.predistorted_negative_projections.append(predistorted_negative_projection_faded)

        rospy.loginfo("Waiting for projection setting service...")
        rospy.wait_for_service('set_projection')
        rospy.loginfo("Projection setting service found.")
        self.set_projection = rospy.ServiceProxy('set_projection', projector.srv.SetProjection)

        self.pixel_associations_msg = None
        self.point_cloud_msg = None

        rospy.Service("~get_point_cloud", graycode_scanner.srv.GetPointCloud, self.handle_point_cloud_srv)

        point_cloud_pub = rospy.Publisher('~point_cloud', sensor_msgs.msg.PointCloud)

        rospy.loginfo("Ready.")

        rate = rospy.Rate(1)
        while not rospy.is_shutdown():
            if self.point_cloud_msg is not None:
                point_cloud_pub.publish(self.point_cloud_msg)
            rate.sleep()

    def read_scanner_info(self):
        try:
            input_stream = file(self.scanner_info_file_name, 'r')
        except IOError:
            rospy.loginfo('Specified scanner info file at ' + self.scanner_info_file_name + ' does not exist.')
            return
        info_dict = yaml.load(input_stream)
        try:
            self.projector_to_camera_translation_vector = tuple(info_dict['projector_to_camera_translation_vector'])
            self.projector_to_camera_rotation_vector = list_to_matrix(info_dict['projector_to_camera_rotation_vector'], 3, 1, cv.CV_32FC1)
        except (TypeError, KeyError):
            rospy.loginfo('Scanner info file at ' + self.scanner_info_file_name + ' is in an unrecognized format.')
            return
        rospy.loginfo('Scanner info successfully read from ' + self.scanner_info_file_name)

    def update_image(self, imagemsg):
        with self.mutex:
            if not self.image_update_flag.is_set():
                self.latest_image = self.bridge.imgmsg_to_cv(imagemsg, "mono8")
                self.image_update_flag.set()
    
    def get_next_image(self):
        with self.mutex:
            self.image_update_flag.clear()
        self.image_update_flag.wait()
        with self.mutex:
            return self.latest_image
                            
    def get_picture_of_projection(self, projection):
        image_message = self.bridge.cv_to_imgmsg(projection, encoding="mono8")
        self.set_projection(image_message)
        return self.get_next_image()
    
    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(self.get_picture_of_projection(self.predistorted_positive_projections[i]))
            negatives.append(self.get_picture_of_projection(self.predistorted_negative_projections[i]))
        
            
        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask, cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)
            
            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2 ** i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)), -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations, projector_line_associations_bitshifted_right, (2 ** -(2 ** i)), -0.5)
            cv.Xor(projector_line_associations, projector_line_associations_bitshifted_right, projector_line_associations)
        
        rospy.loginfo("Post processing...")
        
        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)
        
        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines, invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)
        
        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations

    def handle_point_cloud_srv(self, req):
        response = graycode_scanner.srv.GetPointCloudResponse()
        self.get_point_cloud()
        response.point_cloud = self.point_cloud_msg
        response.success = True
        return response

    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection, encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)
        
        valid_points_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)
        
        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0;
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1
       
        cv.UndistortPoints(rectified_camera_coordinates, rectified_camera_coordinates, camera_model.intrinsicMatrix(), camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates, camera_model)
        
        intersection_points = ray_plane_intersections(camera_rays, planes)
        
        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]
                
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)
     
        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg

    def scanline_numbers_to_planes(self, scanline_numbers):
        rows = scanline_numbers.height
        cols = scanline_numbers.width
        normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_x, -1)
        normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_y, 0)
        normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Copy(scanline_numbers, normal_vectors_z)

        cv.ConvertScale(normal_vectors_z, normal_vectors_z, scale=self.pixels_per_scanline)
        cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z)
        cv.ConvertScale(normal_vectors_z, normal_vectors_z, scale=1.0/self.projector_model.fx())
        
        normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3)
        cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None, normal_vectors)

        # Bring the normal vectors into camera coordinates
        cv.Transform(normal_vectors, normal_vectors, self.projector_to_camera_rotation_matrix)
        
        normal_vectors_split = [None]*3
        for i in range(3):
            normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Split(normal_vectors, normal_vectors_split[0], normal_vectors_split[1], normal_vectors_split[2], None)

        n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.SetZero(n_dot_p)
        for i in range(3):
            cv.ScaleAdd(normal_vectors_split[i], self.projector_to_camera_translation_vector[i], n_dot_p, n_dot_p)

        planes = cv.CreateMat(rows, cols, cv.CV_32FC4)
        cv.Merge(normal_vectors_split[0], normal_vectors_split[1], normal_vectors_split[2], n_dot_p, planes)
        

        return planes

    def display_scanline_associations(self, associations):
        display_image = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.ConvertScale(associations, display_image, 255.0/self.number_of_scanlines)
        cv.NamedWindow("associations", flags=0)
        cv.ShowImage("associations", display_image)
        cv.WaitKey(800)
Пример #17
0
def main(args):
    global image
    rospy.init_node('skeleton_viewer', anonymous=True)

    image_info_sub = rospy.Subscriber("/camera/rgb/camera_info", CameraInfo,
                                      image_info_cb)

    # Init the Kinect object
    kin = Kinect()

    ic = image_converter()

    cam_model = PinholeCameraModel()
    while not camera_info:
        time.sleep(1)
        print("waiting for camera info")

    cam_model.fromCameraInfo(camera_info)
    print(cam_model.cx(), cam_model.cy())

    # font
    font = cv2.FONT_HERSHEY_SIMPLEX

    # fontScale
    fontScale = 1

    # Blue color in BGR
    color = (255, 0, 0)

    # Line thickness of 2 px
    thickness = 2

    rate = rospy.Rate(1)  # 1hz
    while not rospy.is_shutdown():
        try:
            frames, trans = kin.get_posture()
            disp_image = image
            i = 0
            for frame in frames:
                coords = cam_model.project3dToPixel(frame)
                disp_image = cv2.circle(disp_image,
                                        (int(coords[0]), int(coords[1])),
                                        radius=10,
                                        color=(0, 0, 255),
                                        thickness=-1)
                disp_image = cv2.putText(disp_image, FRAMES[i],
                                         (int(coords[0]), int(coords[1])),
                                         font, fontScale, color, thickness,
                                         cv2.LINE_AA)
                i += 1
            #coords = cam_model.project3dToPixel(frame)
            disp_image = cv2.circle(disp_image,
                                    (int(cam_model.cx()), int(cam_model.cy())),
                                    radius=5,
                                    color=(0, 255, 0),
                                    thickness=-1)
            cv2.imshow("Image window", disp_image)
            cv2.waitKey(1)
            #rate.sleep()
        except tf2_ros.TransformException:
            print("user not found error")