def remap(raw_img, camera_info: CameraInfo):
        cam = PinholeCameraModel()
        cam.fromCameraInfo(camera_info)
        map_x, map_y = cv2.initUndistortRectifyMap(
            cam.intrinsicMatrix(),
            cam.distortionCoeffs(),
            cam.R,
            cam.projectionMatrix(),
            (cam.width, cam.height),
            cv2.CV_32FC1,
        )
        rectified_image = cv2.remap(raw_img, map_x, map_y, cv2.INTER_CUBIC)

        return rectified_image
Esempio n. 2
0
class MyNode(DTROS):
    def __init__(self, node_name):
        # initialize the DTROS parent class
        super(MyNode, self).__init__(node_name=node_name)

        # construct publisher and subsriber
        self.pub = rospy.Publisher('chatter', String, queue_size=10)
        self.sub_image = rospy.Subscriber(
            "/duckiesam/camera_node/image/compressed",
            CompressedImage,
            self.find_marker,
            buff_size=921600,
            queue_size=1)
        self.pub_image = rospy.Publisher('~image', Image, queue_size=1)
        self.sub_info = rospy.Subscriber("/duckiesam/camera_node/camera_info",
                                         CameraInfo,
                                         self.get_camera_info,
                                         queue_size=1)
        self.pub_move = rospy.Publisher('/duckiesam/joy_mapper_node/car_cmd',
                                        Twist2DStamped,
                                        queue_size=1)

        #values for detecting marker
        self.starting = 0
        self.ending = 0
        self.camerainfo = PinholeCameraModel()
        self.bridge = CvBridge()
        self.gotimage = False
        self.imagelast = None
        self.processedImg = None
        self.detected = False

        #values for calculating pose of robot
        self.solP = False
        self.rotationvector = None
        self.translationvector = None
        self.axis = np.float32([[0.0125, 0, 0], [0, 0.0125, 0],
                                [0, 0, -0.0375]]).reshape(-1, 3)
        self.distance = None
        self.angle_f = None
        self.angle_l = None

        #values for driving the robot
        self.maxdistance = 0.2
        self.speedN = 0
        self.e_vB = 0
        self.rotationN = 0
        self.mindistance = 0.1
        self.d_e = 0  #distance error
        self.d_e_1 = 5
        self.d_e_2 = 10
        self.y2 = 0
        self.controltime = rospy.Time.now()
        self.Kp = 1
        self.Ki = 0.1
        self.Kd = 0
        self.I = 0
        self.Rp = 1
        self.Ri = 1
        self.Rd = 1

    #get camera info for pinhole camera model
    def get_camera_info(self, camera_msg):
        self.camerainfo.fromCameraInfo(camera_msg)

    #step 1 : find the back circle grids using cv2.findCirclesGrid
    ##### set (x,y) for points and flag for detection
    def find_marker(self, image_msg):
        try:
            self.imagelast = self.bridge.compressed_imgmsg_to_cv2(
                image_msg, "bgr8")
        except CvBridgeError as e:
            print(e)
        if self.gotimage == False:
            self.gotimage = True

        #time checking
        self.starting = rospy.Time.now()
        #from_last_image = (self.starting - self.ending).to_sec()

        gray = cv2.cvtColor(self.imagelast, cv2.COLOR_BGR2GRAY)

        detection, corners = cv2.findCirclesGrid(gray, (7, 3))

        self.processedImg = self.imagelast.copy()
        cmd = Twist2DStamped()
        cmd.header.stamp = self.starting

        if detection:
            cv2.drawChessboardCorners(self.processedImg, (7, 3), corners,
                                      detection)
            self.detected = True
            #self.controltime = rospy.Time.now()
            twoone = []
            for i in range(0, 21):
                point = [corners[i][0][0], corners[i][0][1]]
                twoone.append(point)
            twoone = np.array(twoone)

            self.originalmatrix()
            self.gradient(twoone)
            self.detected = self.solP
            img_out = self.bridge.cv2_to_imgmsg(self.processedImg, "bgr8")
            self.pub_image.publish(img_out)
            self.find_distance()
            self.move(self.y2, self.angle_l, self.distance)
            self.ending = rospy.Time.now()
        else:
            self.detected = False
            img_out = self.bridge.cv2_to_imgmsg(gray, "bgr8")
            self.pub_image.publish(img_out)
            self.ending = rospy.Time.now()
            cmd.v = 0
            cmd.omega = 0
            self.pub_move.publish(cmd)

    #step 2 : makes matrix for 3d original shape
    def originalmatrix(self):
        #coners and points
        self.originalmtx = np.zeros([21, 3])
        for i in range(0, 7):
            for j in range(0, 3):
                self.originalmtx[i + j * 7, 0] = 0.0125 * i - 0.0125 * 3
                self.originalmtx[i + j * 7, 1] = 0.0125 * j - 0.0125

    #step 3 : use intrinsic matrix and solvePnP, return rvec and tvec, print axis
    def gradient(self, imgpts):
        #using solvePnP to find rotation vector and translation vector and also find 3D point to the image plane
        self.solP, self.rotationvector, self.translationvector = cv2.solvePnP(
            self.originalmtx, imgpts, self.camerainfo.intrinsicMatrix(),
            self.camerainfo.distortionCoeffs())
        if self.solP:
            pointsin3D, jacoB = cv2.projectPoints(
                self.originalmtx, self.rotationvector, self.translationvector,
                self.camerainfo.intrinsicMatrix(),
                self.camerainfo.distortionCoeffs())
            pointaxis, _ = cv2.projectPoints(
                self.axis, self.rotationvector, self.translationvector,
                self.camerainfo.intrinsicMatrix(),
                self.camerainfo.distortionCoeffs())
            self.processedImg = cv2.line(self.processedImg,
                                         tuple(imgpts[10].ravel()),
                                         tuple(pointaxis[0].ravel()),
                                         (255, 0, 0), 2)
            self.processedImg = cv2.line(self.processedImg,
                                         tuple(imgpts[10].ravel()),
                                         tuple(pointaxis[1].ravel()),
                                         (0, 255, 0), 2)
            self.processedImg = cv2.line(self.processedImg,
                                         tuple(imgpts[10].ravel()),
                                         tuple(pointaxis[2].ravel()),
                                         (0, 0, 255), 3)

#textdistance = "x = %s, y = %s, z = %s" % (self.distance, self.angle_f, self.angle_l, self.y2)
#rospy.loginfo("%s" % textdistance)

    #step 4 : find distance between robot and following robot print out distance and time
    def find_distance(self):
        #use tvec to calculate distance
        tvx = self.translationvector[0]
        tvy = self.translationvector[1]
        tvz = self.translationvector[2]

        self.distance = math.sqrt(tvx * tvx + tvz * tvz)
        self.angle_f = np.arctan2(tvx[0], tvz[0])

        R, _ = cv2.Rodrigues(self.rotationvector)
        R_inverse = np.transpose(R)
        self.angle_l = np.arctan2(
            -R_inverse[2, 0],
            math.sqrt(R_inverse[2, 1]**2 + R_inverse[2, 2]**2))

        T = np.array([-np.sin(self.angle_l), np.cos(self.angle_l)])
        tvecW = -np.dot(R_inverse, self.translationvector)
        x_y = np.array([tvz[0], tvx[0]])

        self.y2 = -np.dot(T, x_y) - 0.01 * np.sin(self.angle_l)

        textdistance = "Distance = %s, Angle of Follower = %s, Angle of Leader = %s, y = %s" % (
            self.distance, self.angle_f, self.angle_l, self.y2)
        rospy.loginfo("%s" % textdistance)
        #self.pub.publish(textdistance)

    #step 5 : use joy mapper to control the robot PID controller
    def move(self, y_to, angle_to, d):
        #y_to is needed y value to be parallel to leader's center line
        #angle_to is angle needed to rotate
        #d is distance between required position and now position
        cmd = Twist2DStamped()

        time = rospy.Time.now()
        cmd.header.stamp = time
        dt = (time - self.controltime).to_sec()
        if dt > 3:
            if d < self.maxdistance:
                cmd.v = 0
                cmd.omega = 0
        else:
            self.d_e = d - self.maxdistance
            error_d = (self.d_e - self.d_e_1) / dt
            errorB = (self.d_e_1 - self.d_e_2) / dt

            e_v = error_d - errorB

            P = self.Kp * (e_v)
            self.I = self.I + self.Ki * (e_v + self.e_vB) / 2 * dt
            D = self.Kd * (e_v + self.e_vB) / dt

            self.speedN = P + self.I + D

            self.rotationN = self.Rp * (y_to) + self.Ri * (
                angle_to) + self.Rd * (np.sin(angle_to))

            cmd.v = self.speedN
            cmd.omega = self.rotationN

            self.e_vB = e_v
            self.d_e_2 = self.d_e_1
            self.d_e_1 = self.d_e
            if self.d_e < 0.05 or self.speedN < 0:
                cmd.v = 0
                cmd.omega = 0

        textdistance = "Velocity = %s, Rotation = %s" % (cmd.v, cmd.omega)
        rospy.loginfo("%s" % textdistance)
        self.pub_move.publish(cmd)
        self.controltime = time
    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points,
                                             cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates,
                                                camera_model)

        intersection_points = ray_plane_intersections(camera_rays, planes)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]

            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
class Scanner:
    def __init__(self):
        self.pixels_per_scanline = rospy.get_param('~pixels_per_scanline')
        self.scanner_info_file_name = rospy.get_param(
            '~scanner_info_file_name')
        self.threshold = rospy.get_param('~threshold')

        self.mutex = threading.RLock()
        self.image_update_flag = threading.Event()
        self.bridge = CvBridge()
        rospy.Subscriber("image_stream", sensor_msgs.msg.Image,
                         self.update_image)
        rospy.loginfo("Waiting for camera info...")
        self.camera_info = rospy.wait_for_message('camera_info',
                                                  sensor_msgs.msg.CameraInfo)
        rospy.loginfo("Camera info received.")

        rospy.loginfo("Waiting for projector info service...")
        rospy.wait_for_service('get_projector_info')
        rospy.loginfo("Projector info service found.")
        get_projector_info = rospy.ServiceProxy('get_projector_info',
                                                projector.srv.GetProjectorInfo)
        self.projector_info = get_projector_info().projector_info

        self.projector_model = PinholeCameraModel()
        self.projector_model.fromCameraInfo(self.projector_info)

        self.read_scanner_info()

        self.projector_to_camera_rotation_matrix = cv.CreateMat(
            3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector,
                      self.projector_to_camera_rotation_matrix)

        predistortmap_x = cv.CreateMat(self.projector_info.height,
                                       self.projector_info.width, cv.CV_32FC1)
        predistortmap_y = cv.CreateMat(self.projector_info.height,
                                       self.projector_info.width, cv.CV_32FC1)
        InitPredistortMap(self.projector_model.intrinsicMatrix(),
                          self.projector_model.distortionCoeffs(),
                          predistortmap_x, predistortmap_y)

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_x)
        cv.AddS(predistortmap_x, -minVal, predistortmap_x)
        uncropped_projection_width = int(math.ceil(maxVal - minVal))
        self.center_pixel = self.projector_model.cx() - minVal

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_y)
        cv.AddS(predistortmap_y, -minVal, predistortmap_y)
        uncropped_projection_height = int(math.ceil(maxVal - minVal))

        self.number_of_scanlines = int(
            math.ceil(
                float(uncropped_projection_width) / self.pixels_per_scanline))

        rospy.loginfo("Generating projection patterns...")

        graycodes = []
        for i in range(self.number_of_scanlines):
            graycodes.append(graycodemath.generate_gray_code(i))

        self.number_of_projection_patterns = int(
            math.ceil(math.log(self.number_of_scanlines, 2)))
        self.predistorted_positive_projections = []
        self.predistorted_negative_projections = []

        for i in range(self.number_of_projection_patterns):
            cross_section = cv.CreateMat(1, uncropped_projection_width,
                                         cv.CV_8UC1)

            #Fill in cross section with the associated bit of each gray code
            for pixel in range(uncropped_projection_width):
                scanline = pixel // self.pixels_per_scanline
                scanline_value = graycodemath.get_bit(graycodes[scanline],
                                                      i) * 255
                cross_section[0, pixel] = scanline_value

            #Repeat the cross section over the entire image
            positive_projection = cv.CreateMat(uncropped_projection_height,
                                               uncropped_projection_width,
                                               cv.CV_8UC1)
            cv.Repeat(cross_section, positive_projection)

            #Predistort the projections to compensate for projector optics so that that the scanlines are approximately planar
            predistorted_positive_projection = cv.CreateMat(
                self.projector_info.height, self.projector_info.width,
                cv.CV_8UC1)
            cv.Remap(positive_projection,
                     predistorted_positive_projection,
                     predistortmap_x,
                     predistortmap_y,
                     flags=cv.CV_INTER_NN)

            #Create a negative of the pattern for thresholding
            predistorted_negative_projection = cv.CreateMat(
                self.projector_info.height, self.projector_info.width,
                cv.CV_8UC1)
            cv.Not(predistorted_positive_projection,
                   predistorted_negative_projection)

            #Fade the borders of the patterns to avoid artifacts at the edges of projection
            predistorted_positive_projection_faded = fade_edges(
                predistorted_positive_projection, 20)
            predistorted_negative_projection_faded = fade_edges(
                predistorted_negative_projection, 20)

            self.predistorted_positive_projections.append(
                predistorted_positive_projection_faded)
            self.predistorted_negative_projections.append(
                predistorted_negative_projection_faded)

        rospy.loginfo("Waiting for projection setting service...")
        rospy.wait_for_service('set_projection')
        rospy.loginfo("Projection setting service found.")
        self.set_projection = rospy.ServiceProxy('set_projection',
                                                 projector.srv.SetProjection)

        self.pixel_associations_msg = None
        self.point_cloud_msg = None

        rospy.Service("~get_point_cloud", graycode_scanner.srv.GetPointCloud,
                      self.handle_point_cloud_srv)

        point_cloud_pub = rospy.Publisher('~point_cloud',
                                          sensor_msgs.msg.PointCloud)

        rospy.loginfo("Ready.")

        rate = rospy.Rate(1)
        while not rospy.is_shutdown():
            if self.point_cloud_msg is not None:
                point_cloud_pub.publish(self.point_cloud_msg)
            rate.sleep()

    def read_scanner_info(self):
        try:
            input_stream = file(self.scanner_info_file_name, 'r')
        except IOError:
            rospy.loginfo('Specified scanner info file at ' +
                          self.scanner_info_file_name + ' does not exist.')
            return
        info_dict = yaml.load(input_stream)
        try:
            self.projector_to_camera_translation_vector = tuple(
                info_dict['projector_to_camera_translation_vector'])
            self.projector_to_camera_rotation_vector = list_to_matrix(
                info_dict['projector_to_camera_rotation_vector'], 3, 1,
                cv.CV_32FC1)
        except (TypeError, KeyError):
            rospy.loginfo('Scanner info file at ' +
                          self.scanner_info_file_name +
                          ' is in an unrecognized format.')
            return
        rospy.loginfo('Scanner info successfully read from ' +
                      self.scanner_info_file_name)

    def update_image(self, imagemsg):
        with self.mutex:
            if not self.image_update_flag.is_set():
                self.latest_image = self.bridge.imgmsg_to_cv(imagemsg, "mono8")
                self.image_update_flag.set()

    def get_next_image(self):
        with self.mutex:
            self.image_update_flag.clear()
        self.image_update_flag.wait()
        with self.mutex:
            return self.latest_image

    def get_picture_of_projection(self, projection):
        image_message = self.bridge.cv_to_imgmsg(projection, encoding="mono8")
        self.set_projection(image_message)
        return self.get_next_image()

    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(
                self.get_picture_of_projection(
                    self.predistorted_positive_projections[i]))
            negatives.append(
                self.get_picture_of_projection(
                    self.predistorted_negative_projections[i]))

        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height,
                                  self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height,
                                               self.camera_info.width,
                                               cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height,
                                       self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask,
                    cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height,
                                   self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)

            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height,
                                    self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2**i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height,
                                                   self.camera_info.width,
                                                   cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(
                cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)),
                -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(
                self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations,
                            projector_line_associations_bitshifted_right,
                            (2**-(2**i)), -0.5)
            cv.Xor(projector_line_associations,
                   projector_line_associations_bitshifted_right,
                   projector_line_associations)

        rospy.loginfo("Post processing...")

        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height,
                                      self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)

        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height,
                                             self.camera_info.width,
                                             cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines,
                    invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)

        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations

    def handle_point_cloud_srv(self, req):
        response = graycode_scanner.srv.GetPointCloudResponse()
        self.get_point_cloud()
        response.point_cloud = self.point_cloud_msg
        response.success = True
        return response

    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points,
                                             cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates,
                                                camera_model)

        intersection_points = ray_plane_intersections(camera_rays, planes)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]

            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg

    def scanline_numbers_to_planes(self, scanline_numbers):
        rows = scanline_numbers.height
        cols = scanline_numbers.width
        normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_x, -1)
        normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_y, 0)
        normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Copy(scanline_numbers, normal_vectors_z)

        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=self.pixels_per_scanline)
        cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z)
        cv.ConvertScale(normal_vectors_z,
                        normal_vectors_z,
                        scale=1.0 / self.projector_model.fx())

        normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3)
        cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None,
                 normal_vectors)

        # Bring the normal vectors into camera coordinates
        cv.Transform(normal_vectors, normal_vectors,
                     self.projector_to_camera_rotation_matrix)

        normal_vectors_split = [None] * 3
        for i in range(3):
            normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Split(normal_vectors, normal_vectors_split[0],
                 normal_vectors_split[1], normal_vectors_split[2], None)

        n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.SetZero(n_dot_p)
        for i in range(3):
            cv.ScaleAdd(normal_vectors_split[i],
                        self.projector_to_camera_translation_vector[i],
                        n_dot_p, n_dot_p)

        planes = cv.CreateMat(rows, cols, cv.CV_32FC4)
        cv.Merge(normal_vectors_split[0], normal_vectors_split[1],
                 normal_vectors_split[2], n_dot_p, planes)

        return planes

    def display_scanline_associations(self, associations):
        display_image = cv.CreateMat(self.camera_info.height,
                                     self.camera_info.width, cv.CV_8UC1)
        cv.ConvertScale(associations, display_image,
                        255.0 / self.number_of_scanlines)
        cv.NamedWindow("associations", flags=0)
        cv.ShowImage("associations", display_image)
        cv.WaitKey(800)
Esempio n. 5
0
class FollowerDrive(DTROS):
    def __init__(self, node_name):
        # initialize the DTROS parent class
        super(FollowerDrive, self).__init__(node_name=node_name)

        self.listener = tf.TransformListener()
        #self.sub_follower_pose = rospy.Subscriber('', VehiclePose)
        self.sub_image = rospy.Subscriber(
            "/duckiesam/camera_node/image/compressed",
            CompressedImage,
            self.find_marker,
            buff_size=921600,
            queue_size=1)
        self.pub_image = rospy.Publisher('/duckiesam/modified_image',
                                         Image,
                                         queue_size=1)
        self.sub_info = rospy.Subscriber("/duckiesam/camera_node/camera_info",
                                         CameraInfo,
                                         self.get_camera_info,
                                         queue_size=1)
        self.pub_move = rospy.Publisher("/duckiesam/joy_mapper_node/car_cmd",
                                        Twist2DStamped,
                                        queue_size=1)
        self.pub_pose = rospy.Publisher(
            "/duckiesam/velocity_to_pose_node/pose",
            Pose2DStamped,
            queue_size=1)
        self.pub_detection = rospy.Publisher("detect",
                                             BoolStamped,
                                             queue_size=1)

        #values for detecting marker
        self.starting = 0
        self.ending = 0
        self.camerainfo = PinholeCameraModel()
        self.bridge = CvBridge()
        self.gotimage = False
        self.imagelast = None
        self.processedImg = None
        self.detected = False
        self.number = 0

        #values for calculating pose of robot
        self.originalmatrix()
        self.solP = False
        #self.rotationvector = None
        #self.translationvector = None
        self.axis = np.float32([[0.0125, 0, 0], [0, 0.0125, 0],
                                [0, 0, -0.0375]]).reshape(-1, 3)
        #self.distance = None
        #self.angle_f = None
        #self.angle_l = None
        self.count = 0
        self.add_distance = 0.0
        self.add_x = 0.0
        self.add_y = 0.0
        self.add_angle_l = 0.0
        self.needed = True
        #values for driving the robot
        self.initialvalues()

        rospy.on_shutdown(self.my_shutdown)

    def initialvalues(self):

        self.maxdistance = 0.25
        self.speedN = 0
        self.e_vB = 0
        self.rotationN = 0
        self.mindistance = 0.2
        self.d_e = 0  #distance error
        self.d_e_1 = 0
        self.d_e_2 = 0
        self.y2 = 0
        self.controltime = rospy.Time.now()
        self.Kp = 5
        self.Ki = 0
        self.Kd = 0
        self.I = 0
        self.r1 = 3
        self.r2 = 0
        self.r3 = 0

    #get camera info for pinhole camera model
    def get_camera_info(self, camera_msg):
        self.camerainfo.fromCameraInfo(camera_msg)

    #step 1 : find the back circle grids using cv2.findCirclesGrid
    ##### set (x,y) for points and flag for detection
    def find_marker(self, image_msg):
        try:
            self.imagelast = self.bridge.compressed_imgmsg_to_cv2(
                image_msg, "bgr8")
        except CvBridgeError as e:
            print(e)
        if self.gotimage == False:
            self.gotimage = True

        #time checking
        self.starting = rospy.Time.now()
        #from_last_image = (self.starting - self.ending).to_sec()

        gray = cv2.cvtColor(self.imagelast, cv2.COLOR_BGR2GRAY)

        detection, corners = cv2.findCirclesGrid(gray, (7, 3))

        processedImg = self.imagelast.copy()

        if detection:
            cv2.drawChessboardCorners(processedImg, (7, 3), corners, detection)
            self.detected = True

            twoone = []
            for i in range(0, 21):
                point = [corners[i][0][0], corners[i][0][1]]
                twoone.append(point)
            twoone = np.array(twoone)

            rotationvector, translationvector, processedImg = self.gradient(
                twoone, processedImg)
            self.detected = self.solP
            img_out = self.bridge.cv2_to_imgmsg(processedImg, "bgr8")
            self.pub_image.publish(img_out)
            distance, x, y, angle_l = self.find_distance(
                translationvector, rotationvector)

            self.ending = rospy.Time.now()
        else:
            self.detected = False
            img_out = self.bridge.cv2_to_imgmsg(self.imagelast, "bgr8")
            self.pub_image.publish(img_out)
            self.ending = rospy.Time.now()
            #cmd.v = 0
            #cmd.omega = 0
            #self.pub_move.publish(cmd)

    #step 2 : makes matrix for 3d original shape
    def originalmatrix(self):
        #coners and points
        self.originalmtx = np.zeros([21, 3])
        for i in range(0, 7):
            for j in range(0, 3):
                self.originalmtx[i + j * 7, 0] = 0.0125 * i - 0.0125 * 3
                self.originalmtx[i + j * 7, 1] = 0.0125 * j - 0.0125

    #step 3 : use intrinsic matrix and solvePnP, return rvec and tvec, print axis
    def gradient(self, imgpts, processedImg):
        #using solvePnP to find rotation vector and translation vector and also find 3D point to the image plane
        self.solP, rotationvector, translationvector = cv2.solvePnP(
            self.originalmtx, imgpts, self.camerainfo.intrinsicMatrix(),
            self.camerainfo.distortionCoeffs())
        if self.solP:
            pointsin3D, jacoB = cv2.projectPoints(
                self.originalmtx, rotationvector, translationvector,
                self.camerainfo.intrinsicMatrix(),
                self.camerainfo.distortionCoeffs())
            pointaxis, _ = cv2.projectPoints(
                self.axis, rotationvector, translationvector,
                self.camerainfo.intrinsicMatrix(),
                self.camerainfo.distortionCoeffs())
            processedImg = cv2.line(processedImg, tuple(imgpts[10].ravel()),
                                    tuple(pointaxis[0].ravel()), (255, 0, 0),
                                    2)
            processedImg = cv2.line(processedImg, tuple(imgpts[10].ravel()),
                                    tuple(pointaxis[1].ravel()), (0, 255, 0),
                                    2)
            processedImg = cv2.line(processedImg, tuple(imgpts[10].ravel()),
                                    tuple(pointaxis[2].ravel()), (0, 0, 255),
                                    3)

        return rotationvector, translationvector, processedImg

    #step 4 : find distance between robot and following robot print out distance and time
    def find_distance(self, translationvector, rotationvector):
        #use tvec to calculate distance
        tvx = translationvector[0]
        tvy = translationvector[1]
        tvz = translationvector[2]

        distance = math.sqrt(tvx * tvx + tvz * tvz)
        angle_f = np.arctan2(tvx[0], tvz[0])

        R, _ = cv2.Rodrigues(rotationvector)
        R_inverse = np.transpose(R)
        angle_l = np.arctan2(
            -R_inverse[2, 0],
            math.sqrt(R_inverse[2, 1]**2 + R_inverse[2, 2]**2))

        T = np.array([-np.sin(angle_l), np.cos(angle_l)])

        #tvecW is position of camera(follower vehicle) in world frame
        tvecW = -np.dot(R_inverse, translationvector)

        #desire point [0.20, 0] x,y, now tvz = x tvx = y
        x = tvecW[2][0]
        y = tvecW[0][0]

        #y2 = tvecW[0][0] - 0.05*np.sin(angle_l)

        textdistance = "Distance = %s, Angle of Follower = %s, Angle of Leader = %s" % (
            distance, angle_f, angle_l)
        rospy.loginfo("%s" % textdistance)

        if self.count < 10 and self.needed:
            self.add_distance = self.add_distance + distance
            self.add_x = self.add_x + x
            self.add_y = self.add_y + y
            self.add_angle_l = self.add_angle_l
            self.count += 1

        return distance, x, y, angle_l

    def initial_pose(self):
        distance = self.add_distance / self.count
        x = self.add_x / self.count
        y = self.add_y / self.count
        angle_l = self.add_angle_l / self.count
        self.count = 0
        pose_init_msg = Pose2DStamped()
        pose_init_msg.x = x
        pose_init_msg.y = y
        pose_init_msg.theta = angle_l
        self.pub_pose.publish(pose_init_msg)
        self.needed = False
        ready = BoolStamped()
        ready.header.stamp = rospy.Time.now()
        ready.data = True
        self.pub_detection.publish(ready)

    def move(self):
        #from the bag move drive
        rate = rospy.Rate(10.0)
        self.listener.waitForTransform("/duckiesam", "/needtogo", rospy.Time(),
                                       rospy.Duration(4.0))
        while not rospy.is_shutdown():
            try:
                now = rospy.Time.now()
                self.listener.waitForTransform("/duckiesam", "/needtogo", now,
                                               rospy.Duration(4.0))
                (trans,
                 rot) = self.listener.lookupTransform('/duckiesam',
                                                      '/needtogo',
                                                      rospy.Time(0))
            except (tf.LookupException, tf.ConnectivityException,
                    tf.ExtrapolationException):
                continue

            angular = 4 * math.atan2(trans[1], trans[0])
            linear = 0.5 * math.sqrt(trans[0]**2 + trans[1]**2)
            cmd = Twist2DStamped()
            self.number += 1
            cmd.header.seq = self.number
            cmd.v = linear
            cmd.omega = angular
            self.pub_move.publish(cmd)

            rate.sleep()
Esempio n. 6
0
cam_info.D = [
    0.1639958233797625, -0.271840030972792, 0.001055841660100477,
    -0.00166555973740089, 0.0
]
cam_info.K = [
    322.0704122808738, 0.0, 199.2680620421962, 0.0, 320.8673986158544,
    155.2533082600705, 0.0, 0.0, 1.0
]
cam_info.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
cam_info.P = [
    329.2483825683594, 0.0, 198.4101510452074, 0.0, 0.0, 329.1044006347656,
    155.5057121208347, 0.0, 0.0, 0.0, 1.0, 0.0
]

model.fromCameraInfo(cam_info)
print(model.distortionCoeffs())
print(model.intrinsicMatrix())

print('a')


def PointFromPixel(pixel, camera_model, depth=1000):
    print('à')
    ray = model.projectPixelTo3dRay(pixel)
    ray_z = [el / ray[2] for el in ray]
    pt = [el * depth for el in ray_z]
    print('b')
    return pt


pixel1 = (11, 34)
class camera_lidar_calib(object):
    def __init__(self):
        self._cv_bridge = CvBridge()
        self._laser_projector = LaserProjection()

        # # Camera rectification?? To be improved: read from .yaml file
        # Put here the calibration of the camera
        # self.DIM    = (1920, 1208)
        # self.K      = np.array([[693.506921, 0.000000, 955.729444], [0.000000, 694.129349, 641.732500], [0.0, 0.0, 1.0]])
        # self.D      = np.array([[-0.022636], [ -0.033855], [0.013493], [-0.001831]])
        # self.map1, self.map2    = cv2.fisheye.initUndistortRectifyMap(self.K, self.D, np.eye(3), self.K, self.DIM, cv2.CV_16SC2) # np.eye(3) here is actually the rotation matrix

        #   OR load it from a yaml file
        self.cameraModel = PinholeCameraModel()

        # See https://github.com/ros-perception/camera_info_manager_py/tree/master/tests
        camera_infomanager = CameraInfoManager(
            cname='truefisheye',
            url='package://ros_camera_lidar_calib/cfg/truefisheye800x503.yaml'
        )  # Select the calibration file
        camera_infomanager.loadCameraInfo()
        self.cameraInfo = camera_infomanager.getCameraInfo()
        # Crete a camera from camera info
        self.cameraModel.fromCameraInfo(self.cameraInfo)  # Create camera model
        self.DIM = (self.cameraInfo.width, self.cameraInfo.height)
        # Get rectification maps
        self.map1, self.map2 = cv2.fisheye.initUndistortRectifyMap(
            self.cameraModel.intrinsicMatrix(),
            self.cameraModel.distortionCoeffs(), np.eye(3),
            self.cameraModel.intrinsicMatrix(),
            (self.cameraInfo.width, self.cameraInfo.height),
            cv2.CV_16SC2)  # np.eye(3) here is actually the rotation matrix

        # # Declare subscribers to get the latest data
        cam0_subs_topic = '/gmsl_camera/port_0/cam_0/image_raw'
        cam1_subs_topic = '/gmsl_camera/port_0/cam_1/image_raw'
        cam2_subs_topic = '/gmsl_camera/port_0/cam_2/image_raw'
        #cam3_subs_topic = '/gmsl_camera/port_0/cam_3/image_raw/compressed'
        lidar_subs_topic = '/Sensor/points'

        #self.cam0_img_sub   = rospy.Subscriber( cam0_subs_topic , Image, self.cam0_img_callback, queue_size=1)
        #self.cam1_img_sub   = rospy.Subscriber( cam1_subs_topic , Image, self.cam1_img_callback, queue_size=1)
        self.cam2_img_sub = rospy.Subscriber(cam2_subs_topic,
                                             Image,
                                             self.cam2_img_callback,
                                             queue_size=1)
        #self.cam0_img_sub   = rospy.Subscriber( cam0_subs_topic , CompressedImage, self.cam0_img_compre_callback, queue_size=1)
        #self.cam1_img_sub   = rospy.Subscriber( cam1_subs_topic , CompressedImage, self.cam1_img_compre_callback, queue_size=1)
        #self.cam2_img_sub   = rospy.Subscriber( cam2_subs_topic , CompressedImage, self.cam2_img_compre_callback, queue_size=1)
        #self.cam3_img_sub  = rospy.Subscriber( cam3_subs_topic , CompressedImage, self.cam3_img_compre_callback, queue_size=1)
        self.lidar_sub = rospy.Subscriber(lidar_subs_topic,
                                          PointCloud2,
                                          self.lidar_callback,
                                          queue_size=1)
        # Get the tfs
        self.tf_listener = tf.TransformListener()
        #self.lidar_time = rospy.Subscriber(lidar_subs_topic , PointCloud2, self.readtimestamp)
        #self.img0_time = rospy.Subscriber(cam0_subs_topic , CompressedImage, self.readtimestamp)

        # # Declare the global variables we will use to get the latest info
        self.cam0_image_np = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam0_undistorted_img = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam1_image_np = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam1_undistorted_img = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam2_image_np = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam2_undistorted_img = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam3_image_np = np.empty((self.DIM[1], self.DIM[0], 3))
        self.cam3_undistorted_img = np.empty((self.DIM[1], self.DIM[0], 3))
        self.pcl_cloud = np.empty(
            (500, 4)
        )  # Do not know the width of a normal scan. might be variable too......

        self.now = rospy.Time.now()

        # # Main loop: Data projections and alignment on real time
        self.lidar_angle_range_interest = [
            0, 180
        ]  # From -180 to 180. Front is 0deg. Put the range of angles we may want to get points from. Depending of camera etc
        thread.start_new_thread(self.projection_calibrate())

    def projection_calibrate(self):
        # # Main loop: Data projections and alignment on real time
        rate = rospy.Rate(30)  # ?
        rot_trans_matrix = np.empty(0)
        while not rospy.is_shutdown():
            # Get the tfs
            if not rot_trans_matrix.shape[0]:  # Get the tf is empty
                try:
                    (trans, rot) = self.tf_listener.lookupTransform(
                        "/port0_cam3", "/Sensor", rospy.Time(
                            0))  # get different protections here, as needed
                    rot_trans_matrix = self.tf_listener.fromTranslationRotation(
                        trans, rot)
                    # print(trans)
                except (tf.LookupException, tf.ConnectivityException,
                        tf.ExtrapolationException):
                    continue

            ## Projections: see https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
            # # project onto camera frame:
            cloud_oncam = np.dot(rot_trans_matrix[:3, :],
                                 self.pcl_cloud[:, :4].T)
            # Filter points that wont be on the image: remove points behind the camera plane
            #cloud_oncam = cloud_oncam[:,cloud_oncam[2,:]>0.01]
            cloud_oncam_2d = cloud_oncam / cloud_oncam[
                2, :]  # project 3D to 2D plane

            # # Project onto images pixels
            #cloud_pixels =  np.dot( self.K , cloud_oncam_2d ).astype(int) # [u,v].T, u->x-.Horizontal, v->y->vertical
            cloud_pixels = np.dot(np.array(
                self.cameraModel.intrinsicMatrix()), cloud_oncam_2d).astype(
                    int)  # [u,v,1].T, u->x-.Horizontal, v->y->vertical
            cloud_pixels[2, :] = cloud_oncam[
                2, :]  #Append on the last dim the real depth. Not in camera plane dimensions. DEPTH: [u,v,depth in m].T

            # filter pixels out of img
            padding = 2
            cloud_pixels = cloud_pixels[:, (cloud_pixels[0, :] > padding) & (
                cloud_pixels[0, :] <
                (self.DIM[0] - padding)) & (cloud_pixels[1, :] > padding) & (
                    cloud_pixels[1, :] <
                    (self.DIM[1] -
                     padding))]  # Filter those points outside the image

            image = self.cam2_undistorted_img  # improve loop performance by putting it in a variable

            for idx in range(len(cloud_pixels[2])):
                image[cloud_pixels[1, idx], cloud_pixels[0, idx], :] = [
                    0, 0, np.clip(cloud_pixels[2, idx] * 20, 0, 255)
                ]

            self.cam2_undistorted_img = image

            # Show imgs
            cv2.imshow("undistorted2", self.cam2_undistorted_img)
            #cv2.imshow("distorted0",self.cam0_image_np)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            # Loop!
            rate.sleep()

        cv2.destroyAllWindows()

    def readtimestamp(self, data):
        print(data.header.stamp)
        #print(data.header.frame_id,data.header.stamp,data.header.seq)

    def cam0_img_callback(self, image_msg):
        # Get image as np
        # self.cam0_image_np = self._cv_bridge.compressed_imgmsg_to_cv2(image_msg)
        self.cam0_image_np = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")

        self.cam0_undistorted_img = cv2.remap(self.cam0_image_np,
                                              self.map1,
                                              self.map2,
                                              interpolation=cv2.INTER_LANCZOS4,
                                              borderMode=cv2.BORDER_CONSTANT)

    def cam1_img_callback(self, image_msg):
        # Get image as np
        # self.cam0_image_np = self._cv_bridge.compressed_imgmsg_to_cv2(image_msg)
        self.cam1_image_np = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")

        self.cam1_undistorted_img = cv2.remap(self.cam1_image_np,
                                              self.map1,
                                              self.map2,
                                              interpolation=cv2.INTER_LANCZOS4,
                                              borderMode=cv2.BORDER_CONSTANT)

    def cam2_img_callback(self, image_msg):
        # Get image as np
        # self.cam0_image_np = self._cv_bridge.compressed_imgmsg_to_cv2(image_msg)
        self.cam2_image_np = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")

        self.cam2_undistorted_img = cv2.remap(self.cam2_image_np,
                                              self.map1,
                                              self.map2,
                                              interpolation=cv2.INTER_LANCZOS4,
                                              borderMode=cv2.BORDER_CONSTANT)

    # def cam0_img_compre_callback(self,image_msg):
    #     # Get image as np
    #     self.cam0_image_np = self._cv_bridge.compre_imgmsg_to_cv2(image_msg)
    #     #self.cam0_image_np = cv2.imdecode( np.fromstring(image_msg.data, np.uint8) , cv2.IMREAD_COLOR) # OpenCV >= 3.0:
    #
    #     self.cam0_undistorted_img = cv2.remap(self.cam0_image_np, self.map1,  self.map2, interpolation=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
    #
    # def cam1_img_compre_callback(self,image_msg):
    #     # Get image as np
    #     self.cam1_image_np = self._cv_bridge.compressed_imgmsg_to_cv2(image_msg)
    #     self.cam1_undistorted_img = cv2.remap(self.cam0_image_np,  self.map1,  self.map2, interpolation=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
    #
    # def cam2_img_compre_callback(self,image_msg):
    #     # Get image as np
    #     self.cam2_image_np = self._cv_bridge.compressed_imgmsg_to_cv2(image_msg)
    #     self.cam2_undistorted_img = cv2.remap(self.cam0_image_np, self.map1,  self.map2, interpolation=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)
    #
    # def cam3_img_compre_callback(self,image_msg):
    #     # Get image as np
    #     self.cam3_image_np = self._cv_bridge.compressed_imgmsg_to_cv2(image_msg)
    #     self.cam3_undistorted_img = cv2.remap(self.cam0_image_np,  self.map1,  self.map2, interpolation=cv2.INTER_LANCZOS4, borderMode=cv2.BORDER_CONSTANT)

    def lidar_callback(self, lidar_scan):
        """ Converts a ROS PointCloud2 message to a pcl PointXYZRGB
            Args:
                lidar_scan (PointCloud2): ROS PointCloud2 message
            Returns:
                np pointcloud: numpy XYZ1I point cloud
        """
        # points_list = []
        # for data in pc2.read_points(lidar_scan, skip_nans=True):
        # points_list.append([data[0], data[1], data[2],1.0])
        # self.pcl_cloud = np.array(points_list)

        # 2nd method
        #self.pcl_cloud =  np.array( list(pc2.read_points(lidar_scan, skip_nans=True)) )

        # 3rd method
        nparr = pcl2_2_np.msg_to_arr(lidar_scan)
        width = nparr.shape[1]
        m = width / 360
        self.pcl_cloud = self.lidar_map_flatten(
            nparr[:,
                  int(width / 2.0 + self.lidar_angle_range_interest[0] *
                      m):int(width / 2.0 + self.lidar_angle_range_interest[1] *
                             m)])  # we cloud filter here by angle!

        # Debugging tools
        # print "  Got cloud @ ", 1.0/float(rospy.Time.now().to_sec() - self.now.to_sec()) ," Hz "
        # self.now = rospy.Time.now()

    def lidar_map_flatten(self, nparr):
        """ Converts a numpy array of (height, width) [x,y,z,i,_] into a flatten np array of [xyz1,numer_of_points]
            Args:
                nparr (numpy): numpy array of (8, ~5440) [x,y,z,i,_]loud @ ", 1.0/float(rospy.Time.now().to_sec() - self.now.to_sec()) ," Hz "
        self.now = ros
            Returns:
                np pointcloud: numpy [xyz1,numer_of_points]  point cloud
        """
        pcl_cloud = np.ones((nparr.shape[0] * nparr.shape[1], 4))
        pcl_cloud[:, 0] = np.reshape(nparr['x'], -1)
        pcl_cloud[:, 1] = np.reshape(nparr['y'], -1)
        pcl_cloud[:, 2] = np.reshape(nparr['z'], -1)

        return pcl_cloud

    def spin(self):
        rospy.spin()
class Scanner:
    def __init__(self):
        self.pixels_per_scanline = rospy.get_param('~pixels_per_scanline')
        self.scanner_info_file_name = rospy.get_param('~scanner_info_file_name')
        self.threshold = rospy.get_param('~threshold')        

        self.mutex = threading.RLock()
        self.image_update_flag = threading.Event()
        self.bridge = CvBridge()
        rospy.Subscriber("image_stream", sensor_msgs.msg.Image, self.update_image)
        rospy.loginfo("Waiting for camera info...")
        self.camera_info = rospy.wait_for_message('camera_info', sensor_msgs.msg.CameraInfo)
        rospy.loginfo("Camera info received.")

        rospy.loginfo("Waiting for projector info service...")
        rospy.wait_for_service('get_projector_info')
        rospy.loginfo("Projector info service found.")
        get_projector_info = rospy.ServiceProxy('get_projector_info', projector.srv.GetProjectorInfo)
        self.projector_info = get_projector_info().projector_info

        self.projector_model = PinholeCameraModel()
        self.projector_model.fromCameraInfo(self.projector_info)

        self.read_scanner_info()

        self.projector_to_camera_rotation_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector, self.projector_to_camera_rotation_matrix)        

        predistortmap_x = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_32FC1)
        predistortmap_y = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_32FC1)
        InitPredistortMap(self.projector_model.intrinsicMatrix(), self.projector_model.distortionCoeffs(), predistortmap_x, predistortmap_y)

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_x)
        cv.AddS(predistortmap_x, -minVal, predistortmap_x)
        uncropped_projection_width = int(math.ceil(maxVal - minVal))
        self.center_pixel = self.projector_model.cx() - minVal

        minVal, maxVal, minLoc, maxLoc = cv.MinMaxLoc(predistortmap_y)
        cv.AddS(predistortmap_y, -minVal, predistortmap_y)
        uncropped_projection_height = int(math.ceil(maxVal - minVal))

        self.number_of_scanlines = int(math.ceil(float(uncropped_projection_width)/self.pixels_per_scanline))

        rospy.loginfo("Generating projection patterns...")

        graycodes = []
        for i in range(self.number_of_scanlines):
            graycodes.append(graycodemath.generate_gray_code(i))
    
        self.number_of_projection_patterns = int(math.ceil(math.log(self.number_of_scanlines, 2)))        
        self.predistorted_positive_projections = []
        self.predistorted_negative_projections = []

        for i in range(self.number_of_projection_patterns):
            cross_section = cv.CreateMat(1, uncropped_projection_width, cv.CV_8UC1)

            #Fill in cross section with the associated bit of each gray code
            for pixel in range(uncropped_projection_width):
                scanline = pixel // self.pixels_per_scanline
                scanline_value = graycodemath.get_bit(graycodes[scanline], i) * 255
                cross_section[0, pixel] = scanline_value

            #Repeat the cross section over the entire image
            positive_projection = cv.CreateMat(uncropped_projection_height, uncropped_projection_width, cv.CV_8UC1)
            cv.Repeat(cross_section, positive_projection)

            #Predistort the projections to compensate for projector optics so that that the scanlines are approximately planar
            predistorted_positive_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
            cv.Remap(positive_projection, predistorted_positive_projection, predistortmap_x, predistortmap_y, flags=cv.CV_INTER_NN)

            #Create a negative of the pattern for thresholding
            predistorted_negative_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
            cv.Not(predistorted_positive_projection, predistorted_negative_projection)
                
            #Fade the borders of the patterns to avoid artifacts at the edges of projection
            predistorted_positive_projection_faded = fade_edges(predistorted_positive_projection, 20)
            predistorted_negative_projection_faded = fade_edges(predistorted_negative_projection, 20)
                
            self.predistorted_positive_projections.append(predistorted_positive_projection_faded)
            self.predistorted_negative_projections.append(predistorted_negative_projection_faded)

        rospy.loginfo("Waiting for projection setting service...")
        rospy.wait_for_service('set_projection')
        rospy.loginfo("Projection setting service found.")
        self.set_projection = rospy.ServiceProxy('set_projection', projector.srv.SetProjection)

        self.pixel_associations_msg = None
        self.point_cloud_msg = None

        rospy.Service("~get_point_cloud", graycode_scanner.srv.GetPointCloud, self.handle_point_cloud_srv)

        point_cloud_pub = rospy.Publisher('~point_cloud', sensor_msgs.msg.PointCloud)

        rospy.loginfo("Ready.")

        rate = rospy.Rate(1)
        while not rospy.is_shutdown():
            if self.point_cloud_msg is not None:
                point_cloud_pub.publish(self.point_cloud_msg)
            rate.sleep()

    def read_scanner_info(self):
        try:
            input_stream = file(self.scanner_info_file_name, 'r')
        except IOError:
            rospy.loginfo('Specified scanner info file at ' + self.scanner_info_file_name + ' does not exist.')
            return
        info_dict = yaml.load(input_stream)
        try:
            self.projector_to_camera_translation_vector = tuple(info_dict['projector_to_camera_translation_vector'])
            self.projector_to_camera_rotation_vector = list_to_matrix(info_dict['projector_to_camera_rotation_vector'], 3, 1, cv.CV_32FC1)
        except (TypeError, KeyError):
            rospy.loginfo('Scanner info file at ' + self.scanner_info_file_name + ' is in an unrecognized format.')
            return
        rospy.loginfo('Scanner info successfully read from ' + self.scanner_info_file_name)

    def update_image(self, imagemsg):
        with self.mutex:
            if not self.image_update_flag.is_set():
                self.latest_image = self.bridge.imgmsg_to_cv(imagemsg, "mono8")
                self.image_update_flag.set()
    
    def get_next_image(self):
        with self.mutex:
            self.image_update_flag.clear()
        self.image_update_flag.wait()
        with self.mutex:
            return self.latest_image
                            
    def get_picture_of_projection(self, projection):
        image_message = self.bridge.cv_to_imgmsg(projection, encoding="mono8")
        self.set_projection(image_message)
        return self.get_next_image()
    
    def get_projector_line_associations(self):
        rospy.loginfo("Scanning...")
        positives = []
        negatives = []
        for i in range(self.number_of_projection_patterns):
            positives.append(self.get_picture_of_projection(self.predistorted_positive_projections[i]))
            negatives.append(self.get_picture_of_projection(self.predistorted_negative_projections[i]))
        
            
        rospy.loginfo("Thresholding...")
        strike_sum = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(strike_sum)
        gray_codes = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.SetZero(gray_codes)
        for i in range(self.number_of_projection_patterns):
            difference = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.Sub(positives[i], negatives[i], difference)

            absolute_difference = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.AbsDiff(positives[i], negatives[i], absolute_difference)

            #Mark all the pixels that were "too close to call" and add them to the running total
            strike_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(absolute_difference, self.threshold, strike_mask, cv.CV_CMP_LT)
            strikes = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            cv.Set(strikes, 1, strike_mask)
            cv.Add(strikes, strike_sum, strike_sum)
            
            #Set the corresponding bit in the gray_code
            bit_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
            cv.CmpS(difference, 0, bit_mask, cv.CV_CMP_GT)
            bit_values = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            cv.Set(bit_values, 2 ** i, bit_mask)
            cv.Or(bit_values, gray_codes, gray_codes)

        rospy.loginfo("Decoding...")
        # Decode every gray code into binary
        projector_line_associations = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.Copy(gray_codes, projector_line_associations)
        for i in range(cv.CV_MAT_DEPTH(cv.GetElemType(projector_line_associations)), -1, -1):
            projector_line_associations_bitshifted_right = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
            #Using ConvertScale with a shift of -0.5 to do integer division for bitshifting right
            cv.ConvertScale(projector_line_associations, projector_line_associations_bitshifted_right, (2 ** -(2 ** i)), -0.5)
            cv.Xor(projector_line_associations, projector_line_associations_bitshifted_right, projector_line_associations)
        
        rospy.loginfo("Post processing...")
        
        # Remove all pixels that were "too close to call" more than once
        strikeout_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(strike_sum, 1, strikeout_mask, cv.CV_CMP_GT)
        cv.Set(projector_line_associations, -1, strikeout_mask)
        
        # Remove all pixels that don't decode to a valid scanline number
        invalid_scanline_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.InRangeS(projector_line_associations, 0, self.number_of_scanlines, invalid_scanline_mask)
        cv.Not(invalid_scanline_mask, invalid_scanline_mask)
        cv.Set(projector_line_associations, -1, invalid_scanline_mask)
        
        self.display_scanline_associations(projector_line_associations)

        return projector_line_associations

    def handle_point_cloud_srv(self, req):
        response = graycode_scanner.srv.GetPointCloudResponse()
        self.get_point_cloud()
        response.point_cloud = self.point_cloud_msg
        response.success = True
        return response

    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection, encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)
        
        valid_points_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)
        
        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0;
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1
       
        cv.UndistortPoints(rectified_camera_coordinates, rectified_camera_coordinates, camera_model.intrinsicMatrix(), camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates, camera_model)
        
        intersection_points = ray_plane_intersections(camera_rays, planes)
        
        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]
                
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)
     
        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg

    def scanline_numbers_to_planes(self, scanline_numbers):
        rows = scanline_numbers.height
        cols = scanline_numbers.width
        normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_x, -1)
        normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Set(normal_vectors_y, 0)
        normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Copy(scanline_numbers, normal_vectors_z)

        cv.ConvertScale(normal_vectors_z, normal_vectors_z, scale=self.pixels_per_scanline)
        cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z)
        cv.ConvertScale(normal_vectors_z, normal_vectors_z, scale=1.0/self.projector_model.fx())
        
        normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3)
        cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None, normal_vectors)

        # Bring the normal vectors into camera coordinates
        cv.Transform(normal_vectors, normal_vectors, self.projector_to_camera_rotation_matrix)
        
        normal_vectors_split = [None]*3
        for i in range(3):
            normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.Split(normal_vectors, normal_vectors_split[0], normal_vectors_split[1], normal_vectors_split[2], None)

        n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1)
        cv.SetZero(n_dot_p)
        for i in range(3):
            cv.ScaleAdd(normal_vectors_split[i], self.projector_to_camera_translation_vector[i], n_dot_p, n_dot_p)

        planes = cv.CreateMat(rows, cols, cv.CV_32FC4)
        cv.Merge(normal_vectors_split[0], normal_vectors_split[1], normal_vectors_split[2], n_dot_p, planes)
        

        return planes

    def display_scanline_associations(self, associations):
        display_image = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.ConvertScale(associations, display_image, 255.0/self.number_of_scanlines)
        cv.NamedWindow("associations", flags=0)
        cv.ShowImage("associations", display_image)
        cv.WaitKey(800)
    parser.add_argument('--out', default="")
    parser.add_argument('--cam_info_topic', default="")
    parser.add_argument('--image_topic', default="")

    args = parser.parse_args()

    bag_path = os.path.join(os.getcwd(), args.bag)

    bag = Bag2CameraInfo(bag_path, args.cam_info_topic)
    bag_im = Bag2Images(bag_path, args.image_topic)

    c = PinholeCameraModel()
    c.fromCameraInfo(bag.msg)
    size = c.fullResolution()
    K_new, _ = cv2.getOptimalNewCameraMatrix(c.fullIntrinsicMatrix(),
                                             c.distortionCoeffs(), size, 0.0)

    pts = []
    for x in range(size[0]):
        for y in range(size[1]):
            pts.append([[x, y]])

    new_points = cv2.undistortPoints(np.array(pts, dtype=np.float32),
                                     bag.K,
                                     bag.D,
                                     P=K_new)
    map_x = np.reshape(new_points[:, 0, 0],
                       newshape=(size[0], size[1])).transpose()
    map_y = np.reshape(new_points[:, 0, 1],
                       newshape=(size[0], size[1])).transpose()
Esempio n. 10
0
class VehicleFilterNode(object):
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()

        self.distance_between_centers = self.setupParam(
            'distance_between_centers', 0.0125)
        self.max_reproj_pixelerror_pose_estimation = self.setupParam(
            'max_reproj_pixelerror_pose_estimation', 5)

        self.sub_corners = rospy.Subscriber("~corners",
                                            VehicleCorners,
                                            self.processCorners,
                                            queue_size=1)

        self.pub_pose = rospy.Publisher("~pose", VehiclePose, queue_size=1)
        self.sub_info = rospy.Subscriber("~camera_info",
                                         CameraInfo,
                                         self.processCameraInfo,
                                         queue_size=1)
        self.pub_time_elapsed = rospy.Publisher("~pose_calculation_time",
                                                Float32,
                                                queue_size=1)
        self.pcm = PinholeCameraModel()
        rospy.loginfo("[%s] Initialization completed" % (self.node_name))
        self.lock = mutex()

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name, value)
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value

    def processCameraInfo(self, camera_info_msg):
        if self.lock.testandset():
            self.pcm.fromCameraInfo(camera_info_msg)
            self.lock.unlock()

    def processCorners(self, vehicle_corners_msg):
        # do nothing - just relay the detection
        rospy.loginfo("HMMMM")
        if self.lock.testandset():
            start = rospy.Time.now()
            # print(start)
            self.calcCirclePattern(vehicle_corners_msg.H,
                                   vehicle_corners_msg.W)
            points = []
            for Point32 in vehicle_corners_msg.corners:
                point = [Point32.x, Point32.y]
                points.append(point)
            points = np.array(points)
            # points = np.reshape(points, (2,-1))
            # print(points)
            # print(self.pcm.intrinsicMatrix())
            size = [480, 640]
            focal_length = size[1]
            center = (size[1] / 2, size[0] / 2)
            camera_matrix = np.array([[focal_length, 0, center[0]],
                                      [0, focal_length, center[1]], [0, 0, 1]],
                                     dtype="double")
            print(self.pcm.distortionCoeffs())
            # print(self.circlepattern)
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(self.circlepattern, points,
                                                camera_matrix,
                                                self.pcm.distortionCoeffs())
            rospy.loginfo("WATWAT")
            print("Rot: {}".format(rotation_vector))
            print("Trans: {}".format(translation_vector))
            if success:
                points_reproj, _ = cv2.projectPoints(
                    self.circlepattern, rotation_vector, translation_vector,
                    camera_matrix, self.pcm.distortionCoeffs())
                error = 0
                for i in range(0, len(points_reproj)):
                    error += cv2.norm(points[i], points_reproj[i, 0],
                                      cv2.NORM_L2)

                mean_reproj_error = error / len(points_reproj)
                print(mean_reproj_error)
                print(self.max_reproj_pixelerror_pose_estimation)

                if mean_reproj_error < self.max_reproj_pixelerror_pose_estimation:
                    # print(translation_vector)
                    (R, jac) = cv2.Rodrigues(rotation_vector)
                    R_inv = np.transpose(R)
                    translation_vector = -np.dot(R_inv, translation_vector)
                    pose_msg_out = VehiclePose()
                    pose_msg_out.header.stamp = rospy.Time.now()
                    pose_msg_out.rho.data = sqrt(translation_vector[2]**2 +
                                                 translation_vector[0]**2)
                    pose_msg_out.psi.data = np.arctan2(
                        -R_inv[2, 0], sqrt(R_inv[2, 1]**2 + R_inv[2, 2]**2))
                    pose_msg_out.detection.data = vehicle_corners_msg.detection.data
                    R2 = np.array([[
                        cos(pose_msg_out.psi.data), -sin(pose_msg_out.psi.data)
                    ], [
                        sin(pose_msg_out.psi.data),
                        cos(pose_msg_out.psi.data)
                    ]])
                    translation_vector = - \
                        np.array([translation_vector[2],
                                    translation_vector[0]])
                    translation_vector = np.dot(np.transpose(R2),
                                                translation_vector)
                    pose_msg_out.theta.data = np.arctan2(
                        translation_vector[1], translation_vector[0])
                    self.pub_pose.publish(pose_msg_out)
                    rospy.loginfo("HEYO")
                else:
                    rospy.loginfo(
                        "[%s] Pose estimation failed, too high reprojection error."
                        % (self.node_name))
            else:
                rospy.loginfo("[%s] Pose estimation failed." %
                              (self.node_name))

            elapsed_time = (rospy.Time.now() - start).to_sec()
            self.pub_time_elapsed.publish(elapsed_time)
        self.lock.unlock()
        return

    def calcCirclePattern(self, height, width):
        self.circlepattern_dist = self.distance_between_centers
        self.circlepattern = np.zeros([height * width, 3])
        for i in range(0, width):
            for j in range(0, height):
                self.circlepattern[width - 1 - i + j * width, 0] = self.circlepattern_dist * \
                    i - self.circlepattern_dist * (width - 1) / 2
                self.circlepattern[width - 1 - i + j * width, 1] = self.circlepattern_dist * \
                    j - self.circlepattern_dist * (height - 1) / 2
        print(self.circlepattern)
class LaneControllerNode(DTROS):
    """Computes control action.
    The node compute the commands in form of linear and angular velocitie.
    The configuration parameters can be changed dynamically while the node is running via ``rosparam set`` commands.
    Args:
        node_name (:obj:`str`): a unique, descriptive name for the node that ROS will use
    Configuration:

    Publisher:
        ~car_cmd (:obj:`Twist2DStamped`): The computed control action
    Subscribers:
        ~lane_pose (:obj:`LanePose`): The lane pose estimate from the lane filter
    """
    def __init__(self, node_name):
        # Initialize the DTROS parent class
        super(LaneControllerNode, self).__init__(node_name=node_name,
                                                 node_type=NodeType.CONTROL)

        # Add the node parameters to the parameters dictionary
        self.params = dict()
        self.pp_controller = PurePursuitLaneController(self.params)

        # Construct publishers
        self.pub_car_cmd = rospy.Publisher("~car_cmd",
                                           Twist2DStamped,
                                           queue_size=1,
                                           dt_topic_type=TopicType.CONTROL)

        # Construct subscribers
        # self.sub_lane_reading = rospy.Subscriber("~lane_pose",
        #                                          LanePose,
        #                                          self.cbLanePoses,
        #                                          queue_size=1)

        # self.sub_image = rospy.Subscriber(f"/{os.environ['VEHICLE_NAME']}/camera_node/image/compressed", CompressedImage, self.cb_image, queue_size=1)

        self.sub_image = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/camera_node/image/compressed",
            CompressedImage,
            self.cb_image,
            queue_size=1)

        self.sub_info = rospy.Subscriber(
            f"/{os.environ['VEHICLE_NAME']}/camera_node/camera_info",
            CameraInfo,
            self.cb_process_camera_info,
            queue_size=1)

        self.log("Initialized!")

        self.last_v = 0.2
        self.last_w = 0
        self.bridge = CvBridge()

        self.last_stamp = rospy.Time.now()

        self.pcm = PinholeCameraModel()
        self.pose_estimator = PoseEstimator(
            min_area=CIRCLE_MIN_AREA,
            min_dist_between_blobs=CIRCLE_MIN_DISTANCE,
            height=CIRCLE_PATTERN_HEIGHT,
            width=CIRCLE_PATTERN_WIDTH,
            target_distance=TARGET_DIST,
            camera_mode=CAMERA_MODE,
        )
        self.trajectory = Trajectory()

    def cb_process_camera_info(self, msg):
        """
        Callback that stores the intrinsic calibration into a PinholeCameraModel object.

        Args:

            msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.
        """

        self.pcm.fromCameraInfo(msg)
        self.pose_estimator.initialize_camera_matrix(
            self.pcm.intrinsicMatrix(), self.pcm.distortionCoeffs())

    def cb_image(self, image_msg):
        """Callback receiving pose messages

        Args:
            input_pose_msg (:obj:`LanePose`): Message containing information about the current lane pose.
        """

        now = rospy.Time.now()

        dt = (now - self.last_stamp).to_sec()

        self.last_stamp = now

        if self.pose_estimator.initialized:
            image_cv = self.bridge.compressed_imgmsg_to_cv2(image_msg, "bgr8")

            target_detected, estimated_pose = self.pose_estimator.get_pose(
                image_cv)
            # print(estimated_pose)

            if target_detected:
                self.trajectory.update(estimated_pose)

            elif self.trajectory.is_initialized():
                self.trajectory.predict(dt)

            if self.trajectory.is_initialized():
                v, w = self.trajectory.get_commands()
                car_control_msg = Twist2DStamped()
                car_control_msg.header = image_msg.header
                car_control_msg.v = v
                car_control_msg.omega = w

                self.publishCmd(car_control_msg)

    # def cbLanePoses(self, input_pose_msg):
    #     """Callback receiving pose messages
    #
    #     Args:
    #         input_pose_msg (:obj:`LanePose`): Message containing information about the current lane pose.
    #     """
    #     self.pose_msg = input_pose_msg
    #
    #     car_control_msg = Twist2DStamped()
    #     car_control_msg.header = self.pose_msg.header
    #
    #     # TODO This needs to get changed
    #
    #     v, w = self.pp_controller.pure_pursuit(self.pose_msg.d, self.pose_msg.phi, self.last_v, self.last_w,
    #                                            self.vehicle_detection_msg)
    #
    #     self.last_v = v
    #     self.last_w = w
    #
    #     car_control_msg.v = v
    #     car_control_msg.omega = w
    #
    #     self.publishCmd(car_control_msg)

    def publishCmd(self, car_cmd_msg):
        """Publishes a car command message.

        Args:
            car_cmd_msg (:obj:`Twist2DStamped`): Message containing the requested control action.
        """
        self.pub_car_cmd.publish(car_cmd_msg)

    def cbParametersChanged(self):
        """Updates parameters in the controller object."""

        self.controller.update_parameters(self.params)
Esempio n. 12
0
    def get_point_cloud(self):

        # Scan the scene
        pixel_associations = self.get_pixel_associations()

        # Project white light onto the scene to get the intensities of each picture (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height,
                                               self.projector_info.width,
                                               cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(
            illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height,
                                      self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection,
                                                     encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        projector_model = PinholeCameraModel()
        projector_model.fromCameraInfo(self.projector_info)

        projector_to_camera_rotation_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector,
                      projector_to_camera_rotation_matrix)

        pixel_associations_x = cv.CreateMat(self.camera_info.height,
                                            self.camera_info.width,
                                            cv.CV_32SC1)
        pixel_associations_y = cv.CreateMat(self.camera_info.height,
                                            self.camera_info.width,
                                            cv.CV_32SC1)
        cv.Split(pixel_associations, pixel_associations_x,
                 pixel_associations_y, None, None)
        valid_points_mask_x = cv.CreateMat(self.camera_info.height,
                                           self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_x, -1, valid_points_mask_x, cv.CV_CMP_NE)
        valid_points_mask_y = cv.CreateMat(self.camera_info.height,
                                           self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_y, -1, valid_points_mask_y, cv.CV_CMP_NE)
        valid_points_mask = cv.CreateMat(self.camera_info.height,
                                         self.camera_info.width, cv.CV_8UC1)
        cv.And(valid_points_mask_x, valid_points_mask_y, valid_points_mask)

        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points,
                                                    cv.CV_32FC2)
        rectified_projector_coordinates = cv.CreateMat(1,
                                                       number_of_valid_points,
                                                       cv.CV_32FC2)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    rectified_projector_coordinates[0, i] = pixel_associations[
                        row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates,
                           rectified_camera_coordinates,
                           camera_model.intrinsicMatrix(),
                           camera_model.distortionCoeffs())

        # Convert scanline numbers to pixel numbers
        cv.AddS(rectified_projector_coordinates, 0.5,
                rectified_projector_coordinates)
        cv.ConvertScale(rectified_projector_coordinates,
                        rectified_projector_coordinates,
                        self.pixels_per_scanline)

        # Rectify the projector pixels
        cv.UndistortPoints(rectified_projector_coordinates,
                           rectified_projector_coordinates,
                           projector_model.intrinsicMatrix(),
                           projector_model.distortionCoeffs())

        camera_rays = projectPixelsTo3dRays(rectified_camera_coordinates,
                                            camera_model)

        projector_rays = projectPixelsTo3dRays(rectified_projector_coordinates,
                                               projector_model)

        # Bring the projector rays into camera coordinates
        cv.Transform(projector_rays, projector_rays,
                     projector_to_camera_rotation_matrix)

        camera_centers = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC3)
        cv.SetZero(camera_centers)

        projector_centers = cv.CreateMat(1, number_of_valid_points,
                                         cv.CV_32FC3)
        cv.Set(projector_centers, self.projector_to_camera_translation_vector)

        intersection_points = line_line_intersections(camera_centers,
                                                      camera_rays,
                                                      projector_centers,
                                                      projector_rays)

        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)

        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
Esempio n. 13
0
    def get_point_cloud(self):

        # Scan the scene
        pixel_associations = self.get_pixel_associations()

        # Project white light onto the scene to get the intensities of each picture (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection, encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)

        projector_model = PinholeCameraModel()
        projector_model.fromCameraInfo(self.projector_info)

        projector_to_camera_rotation_matrix = cv.CreateMat(3, 3, cv.CV_32FC1)
        cv.Rodrigues2(self.projector_to_camera_rotation_vector, projector_to_camera_rotation_matrix)
        
        pixel_associations_x = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        pixel_associations_y = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_32SC1)
        cv.Split(pixel_associations, pixel_associations_x, pixel_associations_y, None, None)
        valid_points_mask_x = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_x, -1, valid_points_mask_x, cv.CV_CMP_NE)
        valid_points_mask_y = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(pixel_associations_y, -1, valid_points_mask_y, cv.CV_CMP_NE)
        valid_points_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.And(valid_points_mask_x, valid_points_mask_y, valid_points_mask)
        
        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC2)
        rectified_projector_coordinates = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC2)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0;
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    rectified_projector_coordinates[0, i] = pixel_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1

        cv.UndistortPoints(rectified_camera_coordinates, rectified_camera_coordinates, camera_model.intrinsicMatrix(), camera_model.distortionCoeffs())

        # Convert scanline numbers to pixel numbers
        cv.AddS(rectified_projector_coordinates, 0.5, rectified_projector_coordinates)
        cv.ConvertScale(rectified_projector_coordinates, rectified_projector_coordinates, self.pixels_per_scanline)

        # Rectify the projector pixels
        cv.UndistortPoints(rectified_projector_coordinates, rectified_projector_coordinates, projector_model.intrinsicMatrix(), projector_model.distortionCoeffs())

        camera_rays = projectPixelsTo3dRays(rectified_camera_coordinates, camera_model)

        projector_rays = projectPixelsTo3dRays(rectified_projector_coordinates, projector_model)

        # Bring the projector rays into camera coordinates
        cv.Transform(projector_rays, projector_rays,  projector_to_camera_rotation_matrix)

        camera_centers = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC3)
        cv.SetZero(camera_centers)

        projector_centers = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC3)
        cv.Set(projector_centers, self.projector_to_camera_translation_vector)
        
        intersection_points = line_line_intersections(camera_centers, camera_rays, projector_centers, projector_rays)
        
        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)
     
        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
Esempio n. 14
0
class VehicleFilterNode(object):
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()

        self.config = self.setupParam("~config", "baseline")
        self.cali_file_name = self.setupParam("~cali_file_name", "default")
        rospack = rospkg.RosPack()
        self.cali_file = "/code/catkin_ws/src/dt-core/packages/vehicle_detection/config/vehicle_filter_node/" +  \
            self.cali_file_name + ".yaml"
        if not os.path.isfile(self.cali_file):
            rospy.logwarn("[%s] Can't find calibration file: %s.\n" %
                          (self.node_name, self.cali_file))
        self.loadConfig(self.cali_file)

        self.sub_corners = rospy.Subscriber("~corners",
                                            VehicleCorners,
                                            self.processCorners,
                                            queue_size=1)

        self.pub_pose = rospy.Publisher("~pose", VehiclePose, queue_size=1)
        self.sub_info = rospy.Subscriber("~camera_info",
                                         CameraInfo,
                                         self.processCameraInfo,
                                         queue_size=1)
        self.pub_time_elapsed = rospy.Publisher("~pose_calculation_time",
                                                Float32,
                                                queue_size=1)
        self.pcm = PinholeCameraModel()
        rospy.loginfo("[%s] Initialization completed" % (self.node_name))
        self.lock = mutex()

    def setupParam(self, param_name, default_value):
        value = rospy.get_param(param_name, default_value)
        rospy.set_param(param_name, value)
        rospy.loginfo("[%s] %s = %s " % (self.node_name, param_name, value))
        return value

    def loadConfig(self, filename):
        stream = file(filename, 'r')
        data = yaml.load(stream)
        stream.close()
        self.distance_between_centers = data['distance_between_centers']
        rospy.loginfo('[%s] distance_between_centers dim : %s' %
                      (self.node_name, self.distance_between_centers))
        self.max_reproj_pixelerror_pose_estimation = data[
            'max_reproj_pixelerror_pose_estimation']
        rospy.loginfo(
            '[%s] max_reproj_pixelerror_pose_estimation : %s' %
            (self.node_name, self.max_reproj_pixelerror_pose_estimation))

    def processCameraInfo(self, camera_info_msg):
        if self.lock.testandset():
            self.pcm.fromCameraInfo(camera_info_msg)
            self.lock.unlock()

    def processCorners(self, vehicle_corners_msg):
        # do nothing - just relay the detection
        if self.lock.testandset():
            start = rospy.Time.now()
            # print(start)
            self.calcCirclePattern(vehicle_corners_msg.H,
                                   vehicle_corners_msg.W)
            points = []
            for Point32 in vehicle_corners_msg.corners:
                point = [Point32.x, Point32.y]
                points.append(point)
            points = np.array(points)
            # points = np.reshape(points, (2,-1))
            # print(points)
            # print(self.pcm.distortionCoeffs())
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(self.circlepattern, points,
                                                self.pcm.intrinsicMatrix(),
                                                self.pcm.distortionCoeffs())

            if success:
                points_reproj, _ = cv2.projectPoints(
                    self.circlepattern, rotation_vector, translation_vector,
                    self.pcm.intrinsicMatrix(), self.pcm.distortionCoeffs())
                error = 0
                for i in range(0, len(points_reproj)):
                    error += cv2.norm(points[i], points_reproj[i, 0],
                                      cv2.NORM_L2)

                mean_reproj_error = error / len(points_reproj)
                # print(mean_reproj_error)
                # print(self.max_reproj_pixelerror_pose_estimation)

                if mean_reproj_error < self.max_reproj_pixelerror_pose_estimation:
                    # print(translation_vector)
                    (R, jac) = cv2.Rodrigues(rotation_vector)
                    R_inv = np.transpose(R)
                    translation_vector = -np.dot(R_inv, translation_vector)
                    pose_msg_out = VehiclePose()
                    pose_msg_out.header.stamp = rospy.Time.now()
                    pose_msg_out.rho.data = sqrt(translation_vector[2]**2 +
                                                 translation_vector[0]**2)
                    pose_msg_out.psi.data = np.arctan2(
                        -R_inv[2, 0], sqrt(R_inv[2, 1]**2 + R_inv[2, 2]**2))
                    pose_msg_out.detection.data = vehicle_corners_msg.detection.data
                    R2 = np.array([[
                        cos(pose_msg_out.psi.data), -sin(pose_msg_out.psi.data)
                    ], [
                        sin(pose_msg_out.psi.data),
                        cos(pose_msg_out.psi.data)
                    ]])
                    translation_vector = - \
                        np.array([translation_vector[2],
                                  translation_vector[0]])
                    translation_vector = np.dot(np.transpose(R2),
                                                translation_vector)
                    pose_msg_out.theta.data = np.arctan2(
                        translation_vector[1], translation_vector[0])
                    self.pub_pose.publish(pose_msg_out)
                else:
                    rospy.loginfo(
                        "[%s] Pose estimation failed, too high reprojection error."
                        % (self.node_name))
            else:
                rospy.loginfo("[%s] Pose estimation failed." %
                              (self.node_name))

            elapsed_time = (rospy.Time.now() - start).to_sec()
            self.pub_time_elapsed.publish(elapsed_time)
        self.lock.unlock()
        return

    def calcCirclePattern(self, height, width):
        self.circlepattern_dist = self.distance_between_centers
        self.circlepattern = np.zeros([height * width, 3])
        for i in range(0, width):
            for j in range(0, height):
                self.circlepattern[i + j * width, 0] = self.circlepattern_dist * \
                    i - self.circlepattern_dist * (width - 1) / 2
                self.circlepattern[i + j * width, 1] = self.circlepattern_dist * \
                    j - self.circlepattern_dist * (height - 1) / 2
Esempio n. 15
0
class BackTrackingNode(object):
    def __init__(self):
        self.node_name = rospy.get_name()
        self.bridge = CvBridge()
        self.active = True
        self.currentImg = None

        self.publish_duration = rospy.Duration.from_sec(1.0 / 2.0)
        self.last_stamp = rospy.Time.now()

        #Subscribers and publishers
        self.imagesub = rospy.Subscriber("~image",
                                         CompressedImage,
                                         self.find_marker,
                                         buff_size=921600,
                                         queue_size=1)
        self.movepub = rospy.Publisher("~car_cmd",
                                       Twist2DStamped,
                                       queue_size=10)
        self.imagepub = rospy.Publisher("~image", Image, queue_size=1)
        self.infosub = rospy.Subscriber("~camera_info",
                                        CameraInfo,
                                        self.get_camera_info,
                                        queue_size=1)
        self.camerainfo = PinholeCameraModel()

        rospy.loginfo("[%s] Initialization completed" % (self.node_name))

#get camera info for pinhole camera model

    def get_camera_info(self, camera_msg):
        self.camerainfo.fromCameraInfo(camera_msg)

#step 1 : find the back circle grids using cv2.findCirclesGrid
##### set (x,y) for points and flag for detection

    def find_marker(self, image_msg):

        if not self.active:
            return
        now = rospy.Time.now()
        if now - self.last_stamp < self.publish_duration:
            return
        else:
            self.last_stamp = now

        try:
            self.currentImg = self.bridge.compressed_imgmsg_to_cv2(
                image_msg, "bgr8")
        except CvBridgeError as e:
            print e

        gray = cv2.cvtColor(currentImg, cv2.COLOR_BGR2GRAY)

        detection, corners = cv2.findCirclesGrid(gray, (7, 3))

        copy = currentImg.copy()

        if detection:
            cv2.drawChessboardCorners(copy, (7, 3), corners, detection)

            twoone = []
            for i in range(0, 21):
                point = [corners[i][0][0], corners[i][0][1]]
                twoone.append(point)
            twoone = np.array(twoone)

            self.originalmatrix()
            detection, rvec, tvec = self.gradient(twoone)
            distance(copy, tvec)

        cv2.imshow('detection', copy)
        cv2.wait(1)

#alternative step : find the back circle grids using double thresholds blob detector
##### set (x,y) for points and flag for detection
#    def find_marker2(self, image_msg):

#       if not self.active:
#            return

#step 2 : makes matrix for 3d original shape

    def originalmatrix(self):
        #coners and points
        self.originalmtx = np.zeros([21, 3])
        for i in range(0, 7):
            for j in range(0, 3):
                self.originalmtx[i + j * 7, 0] = 0.0125 * i - 0.0125 * 3
                self.originalmtx[i + j * 7, 1] = 0.0125 * j - 0.0125

#step 3 : use intrinsic matrix and solvePnP, return rvec and tvec, print (show) norm

    def gradient(self, imgpts):
        #using solvePnP to find rotation vector and translation vector
        works, rvec, tvec = cv2.solvePnP(self.originalmtx, imgpts,
                                         self.camerainfo.intrinsicMatrix(),
                                         self.camerainfo.distortionCoeffs())

        return works, rvec, tvec
Esempio n. 16
0
class VehicleFilterNode(DTROS):
    """
    The vehicle filter node is responsible for estimating the relative pose to a detected back pattern of a robot and
    for publishing a stop line message if the other vehicle is too close. We model the vehicle in front as a stop line
    and leave the velocity control to the lane control node.

    Args:
        node_name (:obj:`str`): a unique, descriptive name for the node that ROS will use

    Configuration:
        ~distance_between_centers (:obj:`float`): Distance between the centers of the circles on the back pattern, in meters.
        ~max_reproj_pixelerror_pose_estimation (:obj:`float`): Maximum tolerable reprojection error. If a reprojection error higher than that is observed. A stop message will be published.
        ~virtual_stop_line_offset (:obj:`int`): Desired distance from the back pattern of the robot in front, in meters.

    Subscriber:
        ~centers (:obj:`duckietown_msgs.msg.VehicleCorners`): Detected pattern (if any)
        ~camera_info (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera. Needed for rectifying the segments.

    Publishers:
        ~virtual_stop_line (:obj:`duckietown_msgs.msg.StopLineReading`): Message to the lane controller
        ~debug/visualization_marker (:obj:`visualization_msgs.msg.Marker`): Debug topic that publishes an rViz marker
        ~stopped (:obj:`BoolStamped`): Message to FSM state machine to identify that vehicle is stopped
    """
    def __init__(self, node_name):

        # Initialize the DTROS parent class
        super(VehicleFilterNode, self).__init__(node_name=node_name,
                                                node_type=NodeType.PERCEPTION)

        # Add the node parameters to the parameters dictionary and load their default values
        self.distance_between_centers = DTParam('~distance_between_centers',
                                                param_type=ParamType.FLOAT)

        self.max_reproj_pixelerror_pose_estimation = DTParam(
            '~max_reproj_pixelerror_pose_estimation',
            param_type=ParamType.FLOAT)

        self.virtual_stop_line_offset = DTParam('~virtual_stop_line_offset',
                                                param_type=ParamType.FLOAT)

        self.bridge = CvBridge()

        # these will be defined on the first call to calc_circle_pattern
        self.last_calc_circle_pattern = None
        self.circlepattern_dist = None
        self.circlepattern = None
        self.state = None
        # subscribers
        self.sub_centers = rospy.Subscriber("~centers",
                                            VehicleCorners,
                                            self.cb_process_centers,
                                            queue_size=1)
        self.sub_info = rospy.Subscriber("~camera_info",
                                         CameraInfo,
                                         self.cb_process_camera_info,
                                         queue_size=1)
        """ TODO: REMOVE AFTER ROAD_ANOMALY NODE CONSTRUCTION"""
        self.sub_state = rospy.Subscriber("~mode", FSMState, self.cbFSMState)
        # publishers
        self.pub_virtual_stop_line = rospy.Publisher("~virtual_stop_line",
                                                     StopLineReading,
                                                     queue_size=1)
        self.pub_visualize = rospy.Publisher("~debug/visualization_marker",
                                             Marker,
                                             queue_size=1)
        self.pub_stopped_flag = rospy.Publisher("~stopped",
                                                BoolStamped,
                                                queue_size=1)
        self.pcm = PinholeCameraModel()
        self.changePattern = rospy.ServiceProxy("~set_pattern", ChangePattern)
        self.log("Initialization completed")
        self.last_led_state = None

    def cbFSMState(self, msg):
        self.state = msg.state

    def cb_process_camera_info(self, msg):
        """
        Callback that stores the intrinsic calibration into a PinholeCameraModel object.

        Args:

            msg (:obj:`sensor_msgs.msg.CameraInfo`): Intrinsic properties of the camera.
        """

        self.pcm.fromCameraInfo(msg)

    def cb_process_centers(self, vehicle_centers_msg):
        """
        Callback that processes a back pattern detection. If no detection was made, publishes a dummy stop line message.

        Args:
            vehicle_centers_msg (:obj:`duckietown_msgs.msg.VehicleCorners`): Detected pattern (if any)

        """

        # check if there actually was a detection
        detection = vehicle_centers_msg.detection.data
        if detection:
            self.calc_circle_pattern(vehicle_centers_msg.H,
                                     vehicle_centers_msg.W)
            points = np.zeros(
                (vehicle_centers_msg.H * vehicle_centers_msg.W, 2))
            for i in range(len(points)):
                points[i] = np.array([
                    vehicle_centers_msg.corners[i].x,
                    vehicle_centers_msg.corners[i].y
                ])

            success, rotation_vector, translation_vector = cv2.solvePnP(
                objectPoints=self.circlepattern,
                imagePoints=points,
                cameraMatrix=self.pcm.intrinsicMatrix(),
                distCoeffs=self.pcm.distortionCoeffs())

            if success:
                points_reproj, _ = cv2.projectPoints(
                    objectPoints=self.circlepattern,
                    rvec=rotation_vector,
                    tvec=translation_vector,
                    cameraMatrix=self.pcm.intrinsicMatrix(),
                    distCoeffs=self.pcm.distortionCoeffs())

                mean_reproj_error = np.mean(
                    np.sqrt(
                        np.sum((np.squeeze(points_reproj) - points)**2,
                               axis=1)))

                if mean_reproj_error < self.max_reproj_pixelerror_pose_estimation.value:
                    (R, jac) = cv2.Rodrigues(rotation_vector)
                    R_inv = np.transpose(R)
                    translation_vector = -np.dot(R_inv, translation_vector)
                    distance_to_vehicle = -translation_vector[2]

                    # make the message and publish
                    self.publish_stop_line_msg(
                        header=vehicle_centers_msg.header,
                        detected=True,
                        at=distance_to_vehicle <=
                        self.virtual_stop_line_offset.value,
                        x=distance_to_vehicle)

                    if self.pub_visualize.get_num_connections() > 0:
                        marker_msg = Marker()
                        marker_msg.header = vehicle_centers_msg.header
                        marker_msg.header.frame_id = 'donald'
                        marker_msg.ns = 'my_namespace'
                        marker_msg.id = 0
                        marker_msg.type = Marker.CUBE
                        marker_msg.action = Marker.ADD
                        marker_msg.pose.position.x = -translation_vector[2]
                        marker_msg.pose.position.y = translation_vector[0]
                        marker_msg.pose.position.z = translation_vector[1]
                        marker_msg.pose.orientation.x = 0.0
                        marker_msg.pose.orientation.y = 0.0
                        marker_msg.pose.orientation.z = 0.0
                        marker_msg.pose.orientation.w = 1.0
                        marker_msg.scale.x = 0.1
                        marker_msg.scale.y = 0.1
                        marker_msg.scale.z = 0.1
                        marker_msg.color.r = 1.0
                        marker_msg.color.g = 1.0
                        marker_msg.color.b = 0.0
                        marker_msg.color.a = 1.0
                        marker_msg.lifetime = rospy.Duration.from_sec(1)
                        self.pub_visualize.publish(marker_msg)

                else:
                    self.log(
                        "Pose estimation failed, too high reprojection error. "
                        "Reporting detection at 0cm for safety.")
                    self.publish_stop_line_msg(
                        header=vehicle_centers_msg.header,
                        detected=True,
                        at=True)

            else:
                self.log("Pose estimation failed. "
                         "Reporting detection at 0cm for safety.")
                self.publish_stop_line_msg(header=vehicle_centers_msg.header,
                                           detected=True,
                                           at=True)

        else:
            # publish empty messages
            self.publish_stop_line_msg(header=vehicle_centers_msg.header)

            if self.pub_visualize.get_num_connections() > 0:
                marker_msg = Marker()
                marker_msg.header = vehicle_centers_msg.header
                marker_msg.header.frame_id = 'donald'
                marker_msg.ns = 'my_namespace'
                marker_msg.id = 0
                marker_msg.action = Marker.DELETE
                self.pub_visualize.publish(marker_msg)
        try:
            self.trigger_led_hazard_light(
                detection,
                (distance_to_vehicle <= self.virtual_stop_line_offset.value))
        except Exception:
            self.trigger_led_hazard_light(detection, False)

    def calc_circle_pattern(self, height, width):
        """
        Calculates the physical locations of each dot in the pattern.

        Args:
            height (`int`): number of rows in the pattern
            width (`int`): number of columns in the pattern

        """
        # check if the version generated before is still valid, if not, or first time called, create

        if self.last_calc_circle_pattern is None or self.last_calc_circle_pattern != (
                height, width):
            self.circlepattern_dist = self.distance_between_centers.value
            self.circlepattern = np.zeros([height * width, 3])
            for i in range(0, width):
                for j in range(0, height):
                    self.circlepattern[i + j * width, 0] = self.circlepattern_dist * i - \
                                                           self.circlepattern_dist * (width - 1) / 2
                    self.circlepattern[i + j * width, 1] = self.circlepattern_dist * j - \
                                                           self.circlepattern_dist * (height - 1) / 2

    def trigger_led_hazard_light(self, detection, stopped):
        """
        Publish a service message to trigger the hazard light at the back of the robot
        """
        msg = String()

        if stopped:
            msg.data = "OBSTACLE_STOPPED"
            if msg.data != self.last_led_state:
                self.changePattern(msg)
            self.last_led_state = msg.data
        elif detection:
            msg.data = "OBSTACLE_ALERT"
            if msg.data != self.last_led_state:
                self.changePattern(msg)
            self.last_led_state = msg.data
        else:
            if self.state == "LANE_FOLLOWING":
                msg.data = "CAR_DRIVING"
                if msg.data != self.last_led_state:
                    self.changePattern(msg)
                self.last_led_state = "CAR_DRIVING"
            elif self.state == "NORMAL_JOYSTICK_CONTROL":
                msg.data = "WHITE"
                if msg.data != self.last_led_state:
                    self.changePattern(msg)
                self.last_led_state = msg.data

    def publish_stop_line_msg(self,
                              header,
                              detected=False,
                              at=False,
                              x=0.0,
                              y=0.0):
        """
        Makes and publishes a stop line message.

        Args:
            header: header of a ROS message, usually copied from an incoming message
            detected (`bool`): whether a vehicle has been detected
            at (`bool`): whether we are closer than the desired distance
            x (`float`): distance to the vehicle
            y (`float`): lateral offset from the vehicle (not used, always 0)
        """

        stop_line_msg = StopLineReading()
        stop_line_msg.header = header
        stop_line_msg.stop_line_detected = bool(detected)
        stop_line_msg.at_stop_line = bool(at)
        stop_line_msg.stop_line_point.x = int(x)
        stop_line_msg.stop_line_point.y = int(y)
        self.pub_virtual_stop_line.publish(stop_line_msg)
        """
        Remove once have the road anomaly watcher node
        """
        stopped_flag = BoolStamped()
        stopped_flag.header = header
        stopped_flag.data = bool(at)
        self.pub_stopped_flag.publish(stopped_flag)
Esempio n. 17
0
class LandmarkMethod(object):
    def __init__(self):
        self.subjects = dict()
        self.bridge = CvBridge()
        self.__subject_bridge = SubjectListBridge()
        self.model_size_rescale = 30.0
        self.head_pitch = 0.0
        self.margin = rospy.get_param("~margin", 42)
        self.margin_eyes_height = rospy.get_param("~margin_eyes_height", 36)
        self.margin_eyes_width = rospy.get_param("~margin_eyes_width", 60)
        self.interpupillary_distance = 0.058
        self.cropped_face_size = (rospy.get_param("~face_size_height", 224),
                                  rospy.get_param("~face_size_width", 224))
        self.gpu_id = rospy.get_param("~gpu_id", default="0")
        torch.cuda.set_device(self.gpu_id)
        rospy.loginfo("Using GPU for landmark: {}".format(self.gpu_id))

        self.rgb_frame_id = rospy.get_param("~rgb_frame_id", "/kinect2_link")
        self.rgb_frame_id_ros = rospy.get_param("~rgb_frame_id_ros",
                                                "/kinect2_nonrotated_link")

        self.model_points = None
        self.eye_image_size = (rospy.get_param("~eye_image_height", 36),
                               rospy.get_param("~eye_image_width", 60))

        self.tf_broadcaster = TransformBroadcaster()
        self.tf_listener = TransformListener()
        self.tf_prefix = rospy.get_param("~tf_prefix", default="gaze")

        self.use_previous_headpose_estimate = True
        self.last_rvec = {}
        self.last_tvec = {}
        self.pose_stabilizers = {}  # Introduce scalar stabilizers for pose.

        try:
            tqdm.write("Wait for camera message")
            cam_info = rospy.wait_for_message("/camera_info",
                                              CameraInfo,
                                              timeout=None)
            self.img_proc = PinholeCameraModel()
            # noinspection PyTypeChecker
            self.img_proc.fromCameraInfo(cam_info)
            if np.array_equal(
                    self.img_proc.intrinsicMatrix(),
                    np.matrix([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])):
                raise Exception(
                    'Camera matrix is zero-matrix. Did you calibrate'
                    'the camera and linked to the yaml file in the launch file?'
                )
            tqdm.write("Camera message received")
        except rospy.ROSException:
            raise Exception("Could not get camera info")

        # multiple person images publication
        self.subject_pub = rospy.Publisher("/subjects/images",
                                           MSG_SubjectImagesList,
                                           queue_size=1)
        # multiple person faces publication for visualisation
        self.subject_faces_pub = rospy.Publisher("/subjects/faces",
                                                 Image,
                                                 queue_size=1)

        self.model_points = self._get_full_model_points()

        self.sess_bb = None
        self.face_net = FaceDetector(device="cuda:{}".format(self.gpu_id))

        self.color_sub = rospy.Subscriber("/image",
                                          Image,
                                          self.callback,
                                          buff_size=2**24,
                                          queue_size=1)

        self.facial_landmark_nn = face_alignment.FaceAlignment(
            landmarks_type=face_alignment.LandmarksType._2D,
            device="cuda:{}".format(self.gpu_id),
            flip_input=False)

        Server(ModelSizeConfig, self._dyn_reconfig_callback)

    def _dyn_reconfig_callback(self, config, level):
        self.model_points /= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.model_size_rescale = config["model_size"]
        self.interpupillary_distance = config["interpupillary_distance"]
        self.model_points *= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.head_pitch = config["head_pitch"]
        return config

    def _get_full_model_points(self):
        """Get all 68 3D model points from file"""
        raw_value = []
        filename = rospkg.RosPack().get_path(
            'rt_gene') + '/model_nets/face_model_68.txt'
        with open(filename) as f:
            for line in f:
                raw_value.append(line)
        model_points = np.array(raw_value, dtype=np.float32)
        model_points = np.reshape(model_points, (3, -1)).T
        # model_points *= 4
        model_points[:, -1] *= -1

        # index the expansion of the model based.
        model_points = model_points * (self.interpupillary_distance *
                                       self.model_size_rescale)

        return model_points

    def get_face_bb(self, image):
        faceboxes = []

        start_time = time.time()
        fraction = 4
        image = scipy.misc.imresize(image, 1.0 / fraction)
        detections = self.face_net.detect_from_image(image)
        tqdm.write("Face Detector Frequency: {:.2f}Hz".format(
            1 / (time.time() - start_time)))

        for result in detections:
            x_left_top, y_left_top, x_right_bottom, y_right_bottom, confidence = result * fraction

            if confidence > 0.8 * fraction:
                box = [x_left_top, y_left_top, x_right_bottom, y_right_bottom]
                diff_height_width = (box[3] - box[1]) - (box[2] - box[0])
                offset_y = int(abs(diff_height_width / 2))
                box_moved = self.move_box(box, [0, offset_y])

                # Make box square.
                facebox = self.get_square_box(box_moved)
                faceboxes.append(facebox)

        return faceboxes

    @staticmethod
    def move_box(box, offset):
        """Move the box to direction specified by vector offset"""
        left_x = box[0] + offset[0]
        top_y = box[1] + offset[1]
        right_x = box[2] + offset[0]
        bottom_y = box[3] + offset[1]

        return [left_x, top_y, right_x, bottom_y]

    @staticmethod
    def box_in_image(box, image):
        """Check if the box is in image"""
        rows = image.shape[0]
        cols = image.shape[1]

        return box[0] >= 0 and box[1] >= 0 and box[2] <= cols and box[3] <= rows

    @staticmethod
    def get_square_box(box):
        """Get a square box out of the given box, by expanding it."""
        left_x = box[0]
        top_y = box[1]
        right_x = box[2]
        bottom_y = box[3]

        box_width = right_x - left_x
        box_height = bottom_y - top_y

        # Check if box is already a square. If not, make it a square.
        diff = box_height - box_width
        delta = int(abs(diff) / 2)

        if diff == 0:  # Already a square.
            return box
        elif diff > 0:  # Height > width, a slim box.
            left_x -= delta
            right_x += delta
            if diff % 2 == 1:
                right_x += 1
        else:  # Width > height, a short box.
            top_y -= delta
            bottom_y += delta
            if diff % 2 == 1:
                bottom_y += 1

        return [left_x, top_y, right_x, bottom_y]

    def process_image(self, color_msg):
        tqdm.write('Time now: ' + str(rospy.Time.now().to_sec()) +
                   ' message color: ' + str(color_msg.header.stamp.to_sec()) +
                   ' diff: ' + str(rospy.Time.now().to_sec() -
                                   color_msg.header.stamp.to_sec()))

        start_time = time.time()

        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.detect_landmarks(color_img, timestamp)  # update self.subjects

        tqdm.write('Elapsed after detecting transformed_landmarks: ' +
                   str(time.time() - start_time))

        if not self.subjects:
            tqdm.write("No face found")
            return

        self.get_eye_image()  # update self.subjects

        final_head_pose_images = None
        for subject_id, subject in self.subjects.items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.last_rvec:
                self.last_rvec[subject_id] = np.array([[0.01891013],
                                                       [0.08560084],
                                                       [-3.14392813]])
            if subject_id not in self.last_tvec:
                self.last_tvec[subject_id] = np.array([[-14.97821226],
                                                       [-10.62040383],
                                                       [-2053.03596872]])
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, rotation_vector, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            # Publish all the data
            translation_headpose_tf = self.get_head_translation(
                timestamp, subject_id)

            if success:
                if translation_headpose_tf is not None:
                    euler_angles_head = self.publish_pose(
                        timestamp, translation_headpose_tf, rotation_vector,
                        subject_id)

                    if euler_angles_head is not None:
                        head_pose_image = self.visualize_headpose_result(
                            subject.face_color,
                            gaze_tools.get_phi_theta_from_euler(
                                euler_angles_head))
                        head_pose_image_resized = cv2.resize(
                            head_pose_image,
                            dsize=(224, 224),
                            interpolation=cv2.INTER_CUBIC)

                        if final_head_pose_images is None:
                            final_head_pose_images = head_pose_image_resized
                        else:
                            final_head_pose_images = np.concatenate(
                                (final_head_pose_images,
                                 head_pose_image_resized),
                                axis=1)
            else:
                if not success:
                    tqdm.write("Could not get head pose properly")

        if final_head_pose_images is not None:
            self.publish_subject_list(timestamp, self.subjects)
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                final_head_pose_images, "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)

        tqdm.write('Elapsed total: ' + str(time.time() - start_time) + '\n\n')

    def get_head_pose(self, landmarks, subject_id):
        """
        We are given a set of 2D points in the form of landmarks. The basic idea is that we assume a generic 3D head
        model, and try to fit the 2D points to the 3D model based on the Levenberg-Marquardt optimization. We can use
        OpenCV's implementation of SolvePnP for this.
        This method is inspired by http://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/
        We are planning to replace this with our own head pose estimator.
        :param landmarks: Landmark positions to be used to determine head pose
        :return: success - Whether the pose was successfully extracted
                 rotation_vector - rotation vector that along with translation vector brings points from the model
                 coordinate system to the camera coordinate system
                 translation_vector - see rotation_vector
        """

        image_points_headpose = landmarks

        camera_matrix = self.img_proc.intrinsicMatrix()
        dist_coeffs = self.img_proc.distortionCoeffs()

        try:
            if self.last_rvec[subject_id] is not None and self.last_tvec[
                    subject_id] is not None and self.use_previous_headpose_estimate:
                (success, rotation_vector_unstable, translation_vector_unstable) = \
                    cv2.solvePnP(self.model_points, image_points_headpose, camera_matrix, dist_coeffs,
                                 flags=cv2.SOLVEPNP_ITERATIVE, useExtrinsicGuess=True,
                                 rvec=self.last_rvec[subject_id], tvec=self.last_tvec[subject_id])
            else:
                (success, rotation_vector_unstable, translation_vector_unstable) = \
                    cv2.solvePnP(self.model_points, image_points_headpose, camera_matrix, dist_coeffs,
                                 flags=cv2.SOLVEPNP_ITERATIVE)
        except Exception:
            print('Could not estimate head pose')
            return False, None, None

        if not success:
            print('Could not estimate head pose')
            return False, None, None

        # Apply Kalman filter
        stable_pose = []
        pose_np = np.array(
            (rotation_vector_unstable, translation_vector_unstable)).flatten()
        for value, ps_stb in zip(pose_np, self.pose_stabilizers[subject_id]):
            ps_stb.update([value])
            stable_pose.append(ps_stb.state[0])

        stable_pose = np.reshape(stable_pose, (-1, 3))
        rotation_vector = stable_pose[0]
        translation_vector = stable_pose[1]

        self.last_rvec[subject_id] = rotation_vector
        self.last_tvec[subject_id] = translation_vector

        rotation_vector_swapped = [
            -rotation_vector[2], -rotation_vector[1] + np.pi + self.head_pitch,
            rotation_vector[0]
        ]
        rot_head = tf_transformations.quaternion_from_euler(
            *rotation_vector_swapped)

        return success, rot_head, translation_vector

    def publish_subject_list(self, timestamp, subjects):
        assert (subjects is not None)

        subject_list_message = self.__subject_bridge.images_to_msg(
            subjects, timestamp)

        self.subject_pub.publish(subject_list_message)

    @staticmethod
    def visualize_headpose_result(face_image, est_headpose):
        """Here, we take the original eye eye_image and overlay the estimated gaze."""
        output_image = np.copy(face_image)

        center_x = face_image.shape[1] / 2
        center_y = face_image.shape[0] / 2

        endpoint_x, endpoint_y = gaze_tools.get_endpoint(
            est_headpose[1], est_headpose[0], center_x, center_y, 100)

        cv2.line(output_image, (int(center_x), int(center_y)),
                 (int(endpoint_x), int(endpoint_y)), (0, 0, 255), 3)
        return output_image

    def get_head_translation(self, timestamp, subject_id):
        trans_reshaped = self.last_tvec[subject_id].reshape(3, 1)
        nose_center_3d_rot = [
            -float(trans_reshaped[0] / 1000.0),
            -float(trans_reshaped[1] / 1000.0),
            -float(trans_reshaped[2] / 1000.0)
        ]

        nose_center_3d_rot_frame = self.rgb_frame_id

        try:
            nose_center_3d_rot_pt = PointStamped()
            nose_center_3d_rot_pt.header.frame_id = nose_center_3d_rot_frame
            nose_center_3d_rot_pt.header.stamp = timestamp
            nose_center_3d_rot_pt.point = Point(x=nose_center_3d_rot[0],
                                                y=nose_center_3d_rot[1],
                                                z=nose_center_3d_rot[2])
            nose_center_3d = self.tf_listener.transformPoint(
                self.rgb_frame_id_ros, nose_center_3d_rot_pt)
            nose_center_3d.header.stamp = timestamp

            nose_center_3d_tf = gaze_tools.position_ros_to_tf(
                nose_center_3d.point)

            print('Translation based on landmarks', nose_center_3d_tf)
            return nose_center_3d_tf
        except ExtrapolationException as e:
            print(e)
            return None

    def publish_pose(self, timestamp, nose_center_3d_tf, rot_head, subject_id):
        self.tf_broadcaster.sendTransform(
            nose_center_3d_tf, rot_head, timestamp,
            self.tf_prefix + "/head_pose_estimated" + str(subject_id),
            self.rgb_frame_id_ros)

        return gaze_tools.get_head_pose(nose_center_3d_tf, rot_head)

    def callback(self, color_msg):
        """Simply call process_image."""
        try:
            self.process_image(color_msg)

        except CvBridgeError as e:
            print(e)
        except rospy.ROSException as e:
            if str(e) == "publish() to a closed topic":
                pass
            else:
                raise e

    def __update_subjects(self, new_faceboxes):
        """
        Assign the new faces to the existing subjects (id tracking)
        :param new_faceboxes: new faceboxes detected
        :return: update self.subjects
        """
        assert (self.subjects is not None)
        assert (new_faceboxes is not None)

        if len(new_faceboxes) == 0:
            self.subjects = dict()
            return

        if len(self.subjects) == 0:
            for j, b_new in enumerate(new_faceboxes):
                self.subjects[j] = SubjectDetected(b_new)
            return

        distance_matrix = np.ones((len(self.subjects), len(new_faceboxes)))
        for i, subject in enumerate(self.subjects.values()):
            for j, b_new in enumerate(new_faceboxes):
                distance_matrix[i][j] = np.sqrt(
                    np.mean(
                        ((np.array(subject.face_bb) - np.array(b_new))**2)))
        ids_to_assign = range(len(new_faceboxes))
        self.subjects = dict()
        for id in ids_to_assign:
            subject = np.argmin(distance_matrix[:, id])
            while subject in self.subjects:
                subject += 1
            self.subjects[subject] = SubjectDetected(new_faceboxes[id])

    def __detect_landmarks_one_box(self, facebox, color_img):
        try:
            _bb = map(int, facebox)
            face_img = color_img[_bb[1]:_bb[3], _bb[0]:_bb[2]]
            marks_orig = np.array(
                self.__detect_facial_landmarks(color_img, facebox)[0])

            eye_indices = np.array([36, 39, 42, 45])

            transformed_landmarks = marks_orig[eye_indices]
            transformed_landmarks[:, 0] -= facebox[0]
            transformed_landmarks[:, 1] -= facebox[1]

            return face_img, transformed_landmarks, marks_orig
        except Exception:
            return None, None, None

    def __detect_facial_landmarks(self, color_img, facebox):
        marks = self.facial_landmark_nn.get_landmarks(color_img,
                                                      detected_faces=[facebox])
        return marks

    def detect_landmarks(self, color_img, timestamp):
        faceboxes = self.get_face_bb(color_img)

        self.__update_subjects(faceboxes)

        for subject in self.subjects.values():
            res = self.__detect_landmarks_one_box(subject.face_bb, color_img)
            subject.face_color = res[0]
            subject.transformed_landmarks = res[1]
            subject.marks = res[2]

    def __get_eye_image_one(self, transformed_landmarks, face_aligned_color):
        margin_ratio = 1.0

        try:
            # Get the width of the eye, and compute how big the margin should be according to the width
            lefteye_width = transformed_landmarks[3][
                0] - transformed_landmarks[2][0]
            righteye_width = transformed_landmarks[1][
                0] - transformed_landmarks[0][0]
            lefteye_margin, righteye_margin = lefteye_width * margin_ratio, righteye_width * margin_ratio

            # lefteye_center_x = transformed_landmarks[2][0] + lefteye_width / 2
            # righteye_center_x = transformed_landmarks[0][0] + righteye_width / 2
            lefteye_center_y = (transformed_landmarks[2][1] +
                                transformed_landmarks[3][1]) / 2
            righteye_center_y = (transformed_landmarks[1][1] +
                                 transformed_landmarks[0][1]) / 2

            desired_ratio = self.eye_image_size[0] / self.eye_image_size[1] / 2

            # Now compute the bounding boxes
            # The left / right x-coordinates are computed as the landmark position plus/minus the margin
            # The bottom / top y-coordinates are computed according to the desired ratio, as the width of the image is known
            left_bb = np.zeros(4, dtype=np.int32)
            left_bb[0] = transformed_landmarks[2][0] - lefteye_margin / 2
            left_bb[1] = lefteye_center_y - (
                lefteye_width + lefteye_margin) * desired_ratio * 1.25
            left_bb[2] = transformed_landmarks[3][0] + lefteye_margin / 2
            left_bb[3] = lefteye_center_y + (
                lefteye_width + lefteye_margin) * desired_ratio * 1.25

            right_bb = np.zeros(4, dtype=np.int32)
            right_bb[0] = transformed_landmarks[0][0] - righteye_margin / 2
            right_bb[1] = righteye_center_y - (
                righteye_width + righteye_margin) * desired_ratio * 1.25
            right_bb[2] = transformed_landmarks[1][0] + righteye_margin / 2
            right_bb[3] = righteye_center_y + (
                righteye_width + righteye_margin) * desired_ratio * 1.25

            # Extract the eye images from the aligned image
            left_eye_color = face_aligned_color[left_bb[1]:left_bb[3],
                                                left_bb[0]:left_bb[2], :]
            right_eye_color = face_aligned_color[right_bb[1]:right_bb[3],
                                                 right_bb[0]:right_bb[2], :]

            # for p in transformed_landmarks:  # For debug visualization only
            #     cv2.circle(face_aligned_color, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

            # So far, we have only ensured that the ratio is correct. Now, resize it to the desired size.
            left_eye_color_resized = scipy.misc.imresize(left_eye_color,
                                                         self.eye_image_size,
                                                         interp='lanczos')
            right_eye_color_resized = scipy.misc.imresize(right_eye_color,
                                                          self.eye_image_size,
                                                          interp='lanczos')
        except (ValueError, TypeError) as e:
            print(e)
            return None, None, None, None
        return left_eye_color_resized, right_eye_color_resized, left_bb, right_bb

    # noinspection PyUnusedLocal
    def get_eye_image(self):
        """Extract the left and right eye images given the (dlib) transformed_landmarks and the source image.
        First, align the face. Then, extract the width of the eyes given the landmark positions.
        The height of the images is computed according to the desired ratio of the eye images."""

        start_time = time.time()
        for subject in self.subjects.values():
            res = self.__get_eye_image_one(subject.transformed_landmarks,
                                           subject.face_color)
            subject.left_eye_color = res[0]
            subject.right_eye_color = res[1]
            subject.left_eye_bb = res[2]
            subject.right_eye_bb = res[3]

        tqdm.write('New get_eye_image time: ' + str(time.time() - start_time))

    @staticmethod
    def get_image_points_eyes_nose(landmarks):
        landmarks_x, landmarks_y = landmarks.T[0], landmarks.T[1]

        left_eye_center_x = landmarks_x[42] + (landmarks_x[45] -
                                               landmarks_x[42]) / 2.0
        left_eye_center_y = (landmarks_y[42] + landmarks_y[45]) / 2.0
        right_eye_center_x = landmarks_x[36] + (landmarks_x[40] -
                                                landmarks_x[36]) / 2.0
        right_eye_center_y = (landmarks_y[36] + landmarks_y[40]) / 2.0
        nose_center_x, nose_center_y = (landmarks_x[33] + landmarks_x[31] + landmarks_x[35]) / 3.0, \
                                       (landmarks_y[33] + landmarks_y[31] + landmarks_y[35]) / 3.0

        return (nose_center_x, nose_center_y), \
               (left_eye_center_x, left_eye_center_y), (right_eye_center_x, right_eye_center_y)
Esempio n. 18
0
class LandmarkMethodROS(LandmarkMethodBase):
    def __init__(self, img_proc=None):
        super(LandmarkMethodROS,
              self).__init__(device_id_facedetection=rospy.get_param(
                  "~device_id_facedetection", default="cuda:0"))
        self.subject_tracker = FaceEncodingTracker() if rospy.get_param(
            "~use_face_encoding_tracker",
            default=True) else SequentialTracker()
        self.bridge = CvBridge()
        self.__subject_bridge = SubjectListBridge()

        self.camera_frame = rospy.get_param("~camera_frame", "kinect2_link")
        self.ros_tf_frame = rospy.get_param("~ros_tf_frame",
                                            "kinect2_ros_frame")

        self.tf2_broadcaster = TransformBroadcaster()
        self.tf2_buffer = Buffer()
        self.tf2_listener = TransformListener(self.tf2_buffer)
        self.tf_prefix = rospy.get_param("~tf_prefix", default="gaze")
        self.visualise_headpose = rospy.get_param("~visualise_headpose",
                                                  default=True)

        self.pose_stabilizers = {}  # Introduce scalar stabilizers for pose.

        try:
            if img_proc is None:
                tqdm.write("Wait for camera message")
                cam_info = rospy.wait_for_message("/camera_info",
                                                  CameraInfo,
                                                  timeout=None)
                self.img_proc = PinholeCameraModel()
                # noinspection PyTypeChecker
                self.img_proc.fromCameraInfo(cam_info)
            else:
                self.img_proc = img_proc

            if np.array_equal(
                    self.img_proc.intrinsicMatrix().A,
                    np.array([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])):
                raise Exception(
                    'Camera matrix is zero-matrix. Did you calibrate'
                    'the camera and linked to the yaml file in the launch file?'
                )
            tqdm.write("Camera message received")
        except rospy.ROSException:
            raise Exception("Could not get camera info")

        # multiple person images publication
        self.subject_pub = rospy.Publisher("/subjects/images",
                                           MSG_SubjectImagesList,
                                           queue_size=3)
        # multiple person faces publication for visualisation
        self.subject_faces_pub = rospy.Publisher("/subjects/faces",
                                                 Image,
                                                 queue_size=3)

        Server(ModelSizeConfig, self._dyn_reconfig_callback)

        # have the subscriber the last thing that's run to avoid conflicts
        self.color_sub = rospy.Subscriber("/image",
                                          Image,
                                          self.process_image,
                                          buff_size=2**24,
                                          queue_size=3)

    def _dyn_reconfig_callback(self, config, _):
        self.model_points /= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.model_size_rescale = config["model_size"]
        self.interpupillary_distance = config["interpupillary_distance"]
        self.model_points *= (self.model_size_rescale *
                              self.interpupillary_distance)
        self.head_pitch = config["head_pitch"]
        return config

    def process_image(self, color_msg):
        color_img = gaze_tools.convert_image(color_msg, "bgr8")
        timestamp = color_msg.header.stamp

        self.update_subject_tracker(color_img)

        if not self.subject_tracker.get_tracked_elements():
            tqdm.write("\033[2K\033[1;31mNo face found\033[0m", end="\r")
            return

        self.subject_tracker.update_eye_images(self.eye_image_size)

        final_head_pose_images = []
        for subject_id, subject in self.subject_tracker.get_tracked_elements(
        ).items():
            if subject.left_eye_color is None or subject.right_eye_color is None:
                continue
            if subject_id not in self.pose_stabilizers:
                self.pose_stabilizers[subject_id] = [
                    Stabilizer(state_num=2,
                               measure_num=1,
                               cov_process=0.1,
                               cov_measure=0.1) for _ in range(6)
                ]

            success, head_rpy, translation_vector = self.get_head_pose(
                subject.marks, subject_id)

            if success:
                # Publish all the data
                self.publish_pose(timestamp, translation_vector, head_rpy,
                                  subject_id)

                if self.visualise_headpose:
                    # pitch roll yaw
                    trans_msg = self.tf2_buffer.lookup_transform(
                        self.ros_tf_frame, self.tf_prefix +
                        "/head_pose_estimated" + str(subject_id), timestamp)
                    rotation = trans_msg.transform.rotation
                    euler_angles_head = list(
                        transformations.euler_from_quaternion(
                            [rotation.x, rotation.y, rotation.z, rotation.w]))
                    euler_angles_head = gaze_tools.limit_yaw(euler_angles_head)

                    face_image_resized = cv2.resize(
                        subject.face_color,
                        dsize=(224, 224),
                        interpolation=cv2.INTER_CUBIC)

                    final_head_pose_images.append(
                        LandmarkMethodROS.visualize_headpose_result(
                            face_image_resized,
                            gaze_tools.get_phi_theta_from_euler(
                                euler_angles_head)))

        if len(self.subject_tracker.get_tracked_elements().items()) > 0:
            self.publish_subject_list(
                timestamp, self.subject_tracker.get_tracked_elements())

        if len(final_head_pose_images) > 0:
            headpose_image_ros = self.bridge.cv2_to_imgmsg(
                np.hstack(final_head_pose_images), "bgr8")
            headpose_image_ros.header.stamp = timestamp
            self.subject_faces_pub.publish(headpose_image_ros)

    def get_head_pose(self, landmarks, subject_id):
        """
        We are given a set of 2D points in the form of landmarks. The basic idea is that we assume a generic 3D head
        model, and try to fit the 2D points to the 3D model based on the Levenberg-Marquardt optimization. We can use
        OpenCV's implementation of SolvePnP for this.
        This method is inspired by http://www.learnopencv.com/head-pose-estimation-using-opencv-and-dlib/
        We are planning to replace this with our own head pose estimator.
        :param landmarks: Landmark positions to be used to determine head pose
        :param subject_id: ID of the subject that corresponds to the given landmarks
        :return: success - Whether the pose was successfully extracted
                 rotation_vector - rotation vector that along with translation vector brings points from the model
                 coordinate system to the camera coordinate system
                 translation_vector - see rotation_vector
        """

        camera_matrix = self.img_proc.intrinsicMatrix()
        dist_coeffs = self.img_proc.distortionCoeffs()

        try:
            success, rodrigues_rotation, translation_vector, _ = cv2.solvePnPRansac(
                self.model_points,
                landmarks.reshape(len(self.model_points), 1, 2),
                cameraMatrix=camera_matrix,
                distCoeffs=dist_coeffs,
                flags=cv2.SOLVEPNP_DLS)

        except cv2.error as e:
            tqdm.write(
                '\033[2K\033[1;31mCould not estimate head pose: {}\033[0m'.
                format(e),
                end="\r")
            return False, None, None

        if not success:
            tqdm.write(
                '\033[2K\033[1;31mUnsuccessful in solvingPnPRanscan\033[0m',
                end="\r")
            return False, None, None

        # this is generic point stabiliser, the underlying representation doesn't matter
        rotation_vector, translation_vector = self.apply_kalman_filter_head_pose(
            subject_id, rodrigues_rotation, translation_vector / 1000.0)

        rotation_vector[0] += self.head_pitch

        _rotation_matrix, _ = cv2.Rodrigues(rotation_vector)
        _rotation_matrix = np.matmul(
            _rotation_matrix, np.array([[0, 1, 0], [0, 0, -1], [-1, 0, 0]]))
        _m = np.zeros((4, 4))
        _m[:3, :3] = _rotation_matrix
        _m[3, 3] = 1
        _rpy_rotation = np.array(transformations.euler_from_matrix(_m))

        return success, _rpy_rotation, translation_vector

    def apply_kalman_filter_head_pose(self, subject_id,
                                      rotation_vector_unstable,
                                      translation_vector_unstable):
        stable_pose = []
        pose_np = np.array(
            (rotation_vector_unstable, translation_vector_unstable)).flatten()
        for value, ps_stb in zip(pose_np, self.pose_stabilizers[subject_id]):
            ps_stb.update([value])
            stable_pose.append(ps_stb.state[0])
        stable_pose = np.reshape(stable_pose, (-1, 3))
        rotation_vector = stable_pose[0]
        translation_vector = stable_pose[1]
        return rotation_vector, translation_vector

    def publish_subject_list(self, timestamp, subjects):
        assert (subjects is not None)

        subject_list_message = self.__subject_bridge.images_to_msg(
            subjects, timestamp)

        self.subject_pub.publish(subject_list_message)

    def publish_pose(self, timestamp, nose_center_3d_tf, head_rpy, subject_id):
        t = TransformStamped()
        t.header.frame_id = self.camera_frame
        t.header.stamp = timestamp
        t.child_frame_id = self.tf_prefix + "/head_pose_estimated" + str(
            subject_id)
        t.transform.translation.x = nose_center_3d_tf[0]
        t.transform.translation.y = nose_center_3d_tf[1]
        t.transform.translation.z = nose_center_3d_tf[2]

        rotation = transformations.quaternion_from_euler(*head_rpy)
        t.transform.rotation.x = rotation[0]
        t.transform.rotation.y = rotation[1]
        t.transform.rotation.z = rotation[2]
        t.transform.rotation.w = rotation[3]

        try:
            self.tf2_broadcaster.sendTransform([t])
        except rospy.ROSException as exc:
            if str(exc) == "publish() to a closed topic":
                pass
            else:
                raise exc

        self.tf2_buffer.set_transform(t, 'extract_landmarks')

    def update_subject_tracker(self, color_img):
        faceboxes = self.get_face_bb(color_img)
        if len(faceboxes) == 0:
            self.subject_tracker.clear_elements()
            return

        tracked_subjects = self.get_subjects_from_faceboxes(
            color_img, faceboxes)

        # track the new faceboxes according to the previous ones
        self.subject_tracker.track(tracked_subjects)
    def get_point_cloud(self):

        # Scan the scene
        col_associations = self.get_projector_line_associations()

        # Project white light onto the scene to get the intensities of each pixel (for coloring our point cloud)
        illumination_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.Set(illumination_projection, 255)
        intensities_image = self.get_picture_of_projection(illumination_projection)

        # Turn projector off when done with it
        off_projection = cv.CreateMat(self.projector_info.height, self.projector_info.width, cv.CV_8UC1)
        cv.SetZero(off_projection)
        off_image_message = self.bridge.cv_to_imgmsg(off_projection, encoding="mono8")
        self.set_projection(off_image_message)

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(self.camera_info)
        
        valid_points_mask = cv.CreateMat(self.camera_info.height, self.camera_info.width, cv.CV_8UC1)
        cv.CmpS(col_associations, -1, valid_points_mask, cv.CV_CMP_NE)
        
        number_of_valid_points = cv.CountNonZero(valid_points_mask)

        rectified_camera_coordinates = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC2)
        scanline_associations = cv.CreateMat(1, number_of_valid_points, cv.CV_32FC1)
        intensities = cv.CreateMat(1, number_of_valid_points, cv.CV_8UC1)
        i = 0;
        for row in range(self.camera_info.height):
            for col in range(self.camera_info.width):
                if valid_points_mask[row, col] != 0:
                    rectified_camera_coordinates[0, i] = (col, row)
                    scanline_associations[0, i] = col_associations[row, col]
                    intensities[0, i] = intensities_image[row, col]
                    i += 1
       
        cv.UndistortPoints(rectified_camera_coordinates, rectified_camera_coordinates, camera_model.intrinsicMatrix(), camera_model.distortionCoeffs())

        # Convert scanline numbers to plane normal vectors
        planes = self.scanline_numbers_to_planes(scanline_associations)

        camera_rays = project_pixels_to_3d_rays(rectified_camera_coordinates, camera_model)
        
        intersection_points = ray_plane_intersections(camera_rays, planes)
        
        self.point_cloud_msg = sensor_msgs.msg.PointCloud()
        self.point_cloud_msg.header.stamp = rospy.Time.now()
        self.point_cloud_msg.header.frame_id = 'base_link'
        channels = []
        channel = sensor_msgs.msg.ChannelFloat32()
        channel.name = "intensity"
        points = []
        intensities_list = []

        for i in range(number_of_valid_points):
            point = geometry_msgs.msg.Point32()
            point.x = intersection_points[0, i][0]
            point.y = intersection_points[0, i][1]
            point.z = intersection_points[0, i][2]
            if point.z < 0:
                print scanline_associations[0, i]
                print rectified_camera_coordinates[0, i]
                
            points.append(point)
            intensity = intensities[0, i]
            intensities_list.append(intensity)
     
        channel.values = intensities_list
        channels.append(channel)
        self.point_cloud_msg.channels = channels
        self.point_cloud_msg.points = points

        return self.point_cloud_msg
Esempio n. 20
0
class BuoyRecognitionNode(object):
    def __init__(self):
        self._tf_buffer = tf2_ros.Buffer(
            rospy.Duration(1200.0))  # tf buffer length
        self._tf_listener = tf2_ros.TransformListener(self._tf_buffer)

        self._camera_input_image_raw_topic = rospy.get_param(
            "~camera_input_image_raw_topic",
            "/cora/sensors/cameras/front_left_camera/image_raw")
        self._camera_input_camera_info_topic = rospy.get_param(
            "~camera_input_camera_info_topic",
            "/cora/sensors/cameras/front_left_camera/camera_info")
        self._tagged_image_output_topic = rospy.get_param(
            "~tagged_image_output_topic", "/tij/camera/tagged_image")
        self._point_cloud_data_topic = rospy.get_param(
            "~pointcloud_data", "/cora/sensors/lidars/front_lidar/points")
        self._buoy_markers_topic = rospy.get_param("~buoy_markers_topic",
                                                   "/tij/markers/buoys")
        self._detection_array_topic = rospy.get_param(
            "~detection_array_topic", "/tij/detections/detection_array")

        self._buoy_color_samples_database_file = rospy.get_param(
            "~buoy_color_samples_database_file", "samples.csv")
        self._depth_sensor_min_distance = rospy.get_param(
            "~depth_sensor_min_distance", 5)
        self._depth_sensor_max_distance = rospy.get_param(
            "~depth_sensor_max_distance", 100)
        self._depth_sensor_clustering_range = rospy.get_param(
            "~depth_sensor_clustering_range", 1.0)

        self._max_perception_reach = rospy.get_param("~max_perception_reach",
                                                     100)
        self._max_classification_reach = rospy.get_param(
            "~max_classification_reach", 40)

        self._canny_threshold_min = rospy.get_param("~canny_threshold_min",
                                                    100)
        self._canny_threshold_max = rospy.get_param("~canny_threshold_max",
                                                    300)
        self._border_reduction_kernel_size = rospy.get_param(
            "~border_reduction_kernel_size", 7)

        self._map_frame_id = rospy.get_param("~map_frame_id", "cora/odom")

        self._next_known_object_id = 0

        # don't let anything uninitialized
        self._image_mask = None
        self._laser_detected_objects = None

        self._cv_bridge = CvBridge()
        self._camera_model = None
        self._known_objects = {}

        self._buoy_classifier = BuoyClassifier(
            self._buoy_color_samples_database_file)

        # publishers
        self._tagged_image_output_pub = rospy.Publisher(
            self._tagged_image_output_topic, Image, queue_size=10)
        self._marker_publisher = rospy.Publisher(self._buoy_markers_topic,
                                                 MarkerArray,
                                                 queue_size=10)
        self._detection_array_pub = rospy.Publisher(
            self._detection_array_topic, DetectionDataArray, queue_size=10)

        # subscribers
        self._image_raw_sub = rospy.Subscriber(
            self._camera_input_image_raw_topic,
            Image,
            self._image_raw_callback,
            queue_size=1)
        self._camera_info_sub = rospy.Subscriber(
            self._camera_input_camera_info_topic, CameraInfo,
            self._camera_info_callback)
        self._camera_info_sub = rospy.Subscriber(
            self._point_cloud_data_topic, PointCloud2,
            self._point_cloud_data_callback)

    def _get_laser_detections_in_camera_frame(self, camera_frame_id,
                                              image_timestamp):
        # Convert objects detected in the point cloud data to the camera frame and
        # pixel coordiantes
        laser_detected_objects_in_camera_frame = []
        if not self._laser_detected_objects:
            return laser_detected_objects_in_camera_frame
        if not self._can_transform_between_frames(
                self._laser_detected_objects["frame_id"],
                self._laser_detected_objects["timestamp"], camera_frame_id,
                image_timestamp):
            return laser_detected_objects_in_camera_frame

        locations_in_world = [
            self._transform_location_between_frames(
                self._laser_detected_objects["frame_id"],
                self._laser_detected_objects["timestamp"], camera_frame_id,
                image_timestamp, detection_location) for detection_location in
            self._laser_detected_objects["detections"]
        ]
        laser_detected_objects_in_camera_frame = [{
            "location_in_world":
            loc,
            "location_in_image":
            self._camera_model.project3dToPixel((loc.x, loc.y, loc.z))
        } for loc in locations_in_world]
        return laser_detected_objects_in_camera_frame

    def _find_rectangle_center(self, rect):
        x, y, w, h = rect
        return (x + w / 2.0), (y + h / 2.0)

    def _is_within_rectangle(self, coord, rect):
        xt, yt = coord
        x1, y1, w, h = rect
        x2 = x1 + w
        y2 = y1 + h
        if ((xt >= x1) and (xt <= x2) and (yt >= y1) and (yt <= y2)):
            return True
        return False

    def _get_distance_between_points(self, coord_1, coord_2):
        x1, y1, = coord_1
        x2, y2, = coord_2
        return np.sqrt((x1 - x2)**2 + (y1 - y2)**2)

    def _match_laser_to_camera(self, laser_detection_data,
                               camera_detection_data, camera_frame_id,
                               image_timestamp):
        objects_found_data = []
        contours, rectangles, areas, rgb_colors, hsv_colors = camera_detection_data

        for laser_hit in laser_detection_data:
            loc_in_world = laser_hit["location_in_world"]
            loc_in_image = laser_hit["location_in_image"]
            # Don't look of laser hits behind the camera
            if loc_in_world.z < 0.0:
                continue

            # Look for the camera detection that's located closest to the point of impact
            # of the laser
            best_rgb = None
            best_hsv = None
            best_rect = None
            best_distance = 1e6  # should be larger than anything possible
            for rect, rgb, hsv in zip(rectangles, rgb_colors, hsv_colors):
                rect_center = self._find_rectangle_center(rect)
                distance_to_center = self._get_distance_between_points(
                    rect_center, loc_in_image)
                if self._is_within_rectangle(
                        loc_in_image,
                        rect) and (distance_to_center < best_distance):
                    best_rgb = rgb
                    best_hsv = hsv
                    best_rect = rect
                    best_distance = distance_to_center
            # if we found a match, we create a register
            if best_rgb is not None and loc_in_world.z < self._max_perception_reach:
                object_width = best_rect[2] * \
                    loc_in_world.z / self._camera_model.fx()
                object_height = best_rect[3] * \
                    loc_in_world.z / self._camera_model.fy()
                hit_info = {}
                hit_info[
                    "location_in_world"] = self._transform_location_between_frames(
                        camera_frame_id, image_timestamp, self._map_frame_id,
                        image_timestamp, loc_in_world)
                hit_info["rectangle_in_image"] = best_rect
                hit_info["rgb_color"] = best_rgb
                hit_info["hsv_color"] = best_hsv
                hit_info["z_distance"] = loc_in_world.z
                hit_info["object_width"] = object_width
                hit_info["object_height"] = object_height
                hit_info[
                    "object_type"] = self._buoy_classifier.classify_buoy_sample_hsv(
                        (object_width, object_height), best_hsv)
                objects_found_data.append(hit_info)
        return objects_found_data

    def _check_if_objecs_is_already_known(self, new_entry):
        # walk through the elemets known to see if they match in the same location
        def distance_between_map_locations(p1, p2):
            # Don't measure distance in z, since that's height
            distance = np.sqrt((p1.x - p2.x)**2 + (p1.y - p2.y)**2)
            return distance

        new_entry_location = new_entry["location_in_world"]
        for _, known_object in self._known_objects.items():
            known_entry_location = known_object.get_location()
            distance = distance_between_map_locations(new_entry_location,
                                                      known_entry_location)
            identity_distance = max(1.0, new_entry["object_width"])
            if (distance < identity_distance):
                if (new_entry["z_distance"] < self._max_classification_reach):
                    known_object.add_positive_observation(
                        new_entry["object_type"])
                return True
        return False

    def _insert_new_object_in_database(self, new_entry):
        next_index = self._next_known_object_id
        self._next_known_object_id += 1
        self._known_objects[next_index] = PhysicalBuoy(
            new_entry["location_in_world"])
        if new_entry["z_distance"] < self._max_classification_reach:
            self._known_objects[next_index].add_positive_observation(
                new_entry["object_type"])

    def _match_known_objects_to_image(self, camera_detection_data,
                                      camera_frame_id, image_timestamp):
        contours, rectangles, areas, rgb_colors, hsv_colors = camera_detection_data
        image_height, image_width = self._image_mask.shape
        for _, known_object in self._known_objects.items():
            p = self._transform_location_between_frames(
                self._map_frame_id, image_timestamp, camera_frame_id,
                image_timestamp, known_object.get_location())
            # is this object within visual range?
            if p.z <= 0 or p.z >= self._max_perception_reach:
                continue
            # if it were there, would it be visible in the image?
            u, v = self._camera_model.project3dToPixel((p.x, p.y, p.z))
            u, v = int(u), int(v)
            if (u < 0) or (u >= image_width) or (v < 0) or (
                    v >= image_height) or (self._image_mask[v, u] != 0):
                continue
            # it should be visible in the image, but is it?
            if not any([
                    self._is_within_rectangle((u, v), rect)
                    for rect in rectangles
            ]):
                known_object.add_negative_observation()

    def _remove_dead_objects(self):
        objects_to_remove = [
            index for index, known_object in self._known_objects.items()
            if known_object.is_dead()
        ]
        for key in objects_to_remove:
            classification = self._known_objects[
                key].get_most_likely_classification()
            loc_str = str(self._known_objects[key].get_location())
            if classification:
                category, prob = classification
                rospy.logwarn(
                    "Removing dead object of type {} ({}) at location {}".
                    format(category, prob, loc_str))
            else:
                rospy.logwarn(
                    "Removing unclassified dead object at location {}".format(
                        loc_str))
            self._known_objects.pop(key, None)

    def _publish_known_objects_info(self):
        if not len(self._known_objects):
            return
        rospy.loginfo("There are {} objects that have been located".format(
            len(self._known_objects)))

        detections_array_msg = DetectionDataArray()
        detections_array_msg.header.stamp = rospy.Time.now()
        detections_array_msg.header.frame_id = self._map_frame_id

        for det_id, known_object in self._known_objects.items():
            classification = known_object.get_most_likely_classification()

            detection_msg = DetectionData()
            detection_msg.id = det_id
            detection_msg.location = known_object.get_location()

            location_str = " ".join(str(known_object.get_location()).split())
            if classification:
                category, probability = classification

                detection_msg.classified = True
                detection_msg.probability = probability
                detection_msg.category = category

                rospy.loginfo(
                    "  - category: {:20s} - probability: {:5.2f} - location: {:30s}"
                    .format(category, probability, location_str))
            else:
                detection_msg.classified = False
                detection_msg.probability = 0.0
                detection_msg.category = ""

                rospy.loginfo(
                    "  - category: {:20s} - probability: {:5.2f} - location: {:30s}"
                    .format("UNKNOWN", 0.0, location_str))

            detections_array_msg.detections.append(detection_msg)
        self._detection_array_pub.publish(detections_array_msg)

    def _process_objects_known_and_new(self, objects_found_data,
                                       camera_detections_data, camera_frame_id,
                                       image_timestamp):
        for new_entry in objects_found_data:
            known = self._check_if_objecs_is_already_known(new_entry)
            if not known:
                self._insert_new_object_in_database(new_entry)
        self._match_known_objects_to_image(camera_detections_data,
                                           camera_frame_id, image_timestamp)
        self._remove_dead_objects()
        self._publish_known_objects_info()

    def _publish_markers(self):
        marray = MarkerArray()
        for index, known_object in self._known_objects.items():
            loc = known_object.get_location()
            m = Marker()
            m.header.stamp = rospy.Time.now()
            m.header.frame_id = self._map_frame_id
            m.ns = "tij"
            m.id = index
            m.type = Marker.CUBE
            m.action = Marker.MODIFY
            m.pose.position = loc
            m.pose.orientation.w = 1.0
            m.scale = Vector3(x=1, y=1, z=1)
            m.lifetime = rospy.Duration(1.0)
            m.frame_locked = True
            marray.markers.append(m)
            if known_object.get_most_likely_classification() is None:
                m.color = ColorRGBA(1.0, 0.0, 0, 1.0)
            else:
                m.color = ColorRGBA(0, 1.0, 0, 1.0)
        self._marker_publisher.publish(marray)

    def _tag_image_detections_on_output_image(self, output_cv_image,
                                              camera_detections_data):
        contours, rectangles, areas, rgb_colors, hsv_colors = camera_detections_data
        # Create the mask only once, then reuse
        for cnt, rect in zip(contours, rectangles):
            x, y, w, h = rect
            # mark the visual detection in the output image
            cv.rectangle(output_cv_image, (x, y), (x + w, y + h), (255, 0, 0),
                         1)
            cv.drawContours(output_cv_image, [cnt], -1, (0, 255, 0), 2)

    def _tag_laser_detections_on_output_image(self, output_cv_image,
                                              laser_detections_data):
        locations_in_image = [
            item["location_in_image"] for item in laser_detections_data
            if item["location_in_world"].z >= 0
        ]
        for loc in locations_in_image:
            x, y = loc
            x, y = int(x), int(y)
            cv.circle(output_cv_image, (x, y), 5, (255, 255, 0), 2)

    def _tag_hit_objects_in_image(self, output_cv_image, objects_found_data):
        # Create the mask only once, then reuse
        for detection in objects_found_data:
            x, y, w, h = detection["rectangle_in_image"]
            # mark the visual detection in the output image
            cv.putText(output_cv_image, detection["object_type"], (x + 5, y),
                       cv.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))

    def _image_raw_callback(self, image_msg):
        # Convert the image from ros msg to opencv domain
        input_cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg,
                                                       desired_encoding='rgb8')

        # If camera model is not ready, we can't do anything
        if self._camera_model is None:
            rospy.logwarn("Waiting for camera model information to arrive...")
            return
        # Generate the image mask only once.
        if self._image_mask is None:
            self._image_mask = self._create_view_for_image(input_cv_image)
        # Recover information from objects detected in the pointcloud
        laser_detections_data = self._get_laser_detections_in_camera_frame(
            image_msg.header.frame_id, image_msg.header.stamp)
        # process image to detect objects
        camera_detections_data = self._get_image_contours(
            input_cv_image,
            (self._canny_threshold_min, self._canny_threshold_max),
            self._border_reduction_kernel_size)
        # match laser hits to camera, and filter remaining data
        objects_found_data = self._match_laser_to_camera(
            laser_detections_data, camera_detections_data,
            image_msg.header.frame_id, image_msg.header.stamp)
        # process objects to determine which are new and which are not
        self._process_objects_known_and_new(objects_found_data,
                                            camera_detections_data,
                                            image_msg.header.frame_id,
                                            image_msg.header.stamp)
        # publish markers
        self._publish_markers()
        # create output image by tagging known information in it
        output_cv_image = input_cv_image.copy()
        # add information recovered through detection to the image
        self._tag_image_detections_on_output_image(output_cv_image,
                                                   camera_detections_data)
        self._tag_laser_detections_on_output_image(output_cv_image,
                                                   laser_detections_data)
        self._tag_hit_objects_in_image(output_cv_image, objects_found_data)
        # Convert the image from opencv domain to ros msg
        output_image = self._cv_bridge.cv2_to_imgmsg(output_cv_image,
                                                     encoding='rgb8')
        # Publish information
        self._tagged_image_output_pub.publish(output_image)

    def _camera_info_callback(self, camera_info_msg):
        if not self._camera_model:
            self._camera_model = PinholeCameraModel()
            self._camera_model.fromCameraInfo(camera_info_msg)
            rospy.loginfo("Got camera description message:")
            rospy.loginfo(" K = {0}, D = {1}".format(
                self._camera_model.intrinsicMatrix(),
                self._camera_model.distortionCoeffs()))

    def _point_cloud_data_callback(self, point_cloud_2_msg):
        # Convert the pointcloud2 data to a list of detected clusters
        # with coordinates, so that we deal with a smaller number of measurements
        detected_objects = self._clusterize_pointcloud_into_objects(
            point_cloud_2_msg)

        def np_vector_to_point(v):
            return Point(x=v[0], y=v[1], z=v[2])

        self._laser_detected_objects = {
            "frame_id": point_cloud_2_msg.header.frame_id,
            "timestamp": point_cloud_2_msg.header.stamp,
            "detections": [np_vector_to_point(v) for v in detected_objects]
        }

    def _clusterize_pointcloud_into_objects(self, point_cloud_2_msg):
        # Object count will be used to generate unique ids for clusters
        object_count = 0
        detected_objects = {}
        for hit in point_cloud2.read_points(point_cloud_2_msg, skip_nans=True):
            hit_pos = np.asarray(hit[0:3])
            # Reject measurements too close or too far
            laser_distance = np.sqrt(sum(hit_pos**2))
            if laser_distance < self._depth_sensor_min_distance or laser_distance > self._depth_sensor_max_distance:
                continue
            # Check if any of the previously known objects is within clustering distance of
            # the new detection. Also, for known objects, we average coordinates of multiple
            # hits within the cluster, to get a better idea of the center location.
            binned = False
            for object_id, obj_data in detected_objects.items():
                # accum_pos is the sum of all previous hits in this cluster, multiplicity
                # is the number of hits in the cluster
                accum_pos, multiplicity = obj_data
                distance = np.sqrt(
                    np.sum((accum_pos / multiplicity - hit_pos)**2))
                if (distance < self._depth_sensor_clustering_range):
                    # add this hit to the cluster
                    detected_objects[object_id] = (accum_pos + hit_pos,
                                                   multiplicity + 1)
                    binned = True
                    break
            # if we did not find a previous cluster near the hit, create a new cluster
            if not binned:
                detected_objects[object_count] = (hit_pos, 1)
                object_count += 1
        # average all the hits within each cluster
        objects = [
            value[0] / value[1] for key, value in detected_objects.items()
        ]
        return objects

    def _create_view_for_image(self, input_image):
        mask = np.zeros(input_image.shape[:2], np.uint8)
        height, width, _ = input_image.shape
        y1 = int(height * 0.25)
        y2 = int(height * 0.55)
        x1 = int(width * 0.2)
        x2 = int(width / 2.0)
        x3 = width - x1
        pts = np.array([(x1, height), (x2, y2), (x3, height)], np.int32)
        pts = pts.reshape((-1, 1, 2))
        cv.fillPoly(mask, [pts], True, 255)
        cv.rectangle(mask, (0, 0), (width, y1), 255, -1)
        return mask

    def _get_image_contours(self,
                            input_image,
                            canny_args=(200, 300),
                            kernel_size=7):
        hsv_image = cv.cvtColor(input_image, cv.COLOR_RGB2HSV)
        edges = cv.Canny(input_image, canny_args[0], canny_args[1])
        blurred = cv.GaussianBlur(edges, (3, 9), 0)
        _, contours, __ = cv.findContours(blurred, cv.RETR_EXTERNAL,
                                          cv.CHAIN_APPROX_SIMPLE)
        rectangles = [cv.boundingRect(cnt) for cnt in contours]
        areas = [cv.contourArea(cnt) for cnt in contours]
        rgb = []
        hsv = []
        dilate_kernel = np.ones((kernel_size, kernel_size), np.uint8)
        erode_kernel = np.ones((2 * kernel_size, 2 * kernel_size), np.uint8)
        for cnt in contours:
            mask = np.zeros(input_image.shape[:2], np.uint8)
            cv.drawContours(mask, [cnt], 0, 255, -1)
            dilated_mask = cv.dilate(mask, dilate_kernel, iterations=1)
            eroded_mask = cv.erode(dilated_mask, erode_kernel, iterations=1)
            rgb_color = cv.mean(input_image, mask=eroded_mask)
            hsv_color = cv.mean(hsv_image, mask=eroded_mask)
            rgb.append(rgb_color[:3])
            hsv.append(hsv_color[:3])

        def masked_out(x, y, w, h):
            xc, yc = self._find_rectangle_center((x, y, w, h))
            xc, yc = int(xc), int(yc)
            return (self._image_mask is None or self._image_mask[yc, xc] != 0)

        visibility_filter = [not masked_out(*rect) for rect in rectangles]

        fcontours = [
            contours[i] for i in range(len(contours)) if visibility_filter[i]
        ]
        frectangles = [
            rectangles[i] for i in range(len(contours)) if visibility_filter[i]
        ]
        fareas = [
            areas[i] for i in range(len(contours)) if visibility_filter[i]
        ]
        frgb = [rgb[i] for i in range(len(contours)) if visibility_filter[i]]
        fhsv = [hsv[i] for i in range(len(contours)) if visibility_filter[i]]

        return fcontours, frectangles, fareas, frgb, fhsv

    def _transform_location_between_frames(self, source_frame,
                                           source_timestamp, dest_frame,
                                           dest_timestamp, location):
        transform = self._tf_buffer.lookup_transform_full(
            target_frame=dest_frame,
            target_time=dest_timestamp,
            source_frame=source_frame,
            source_time=source_timestamp,
            fixed_frame="cora/odom",
            timeout=rospy.Duration(1.0))

        original = PointStamped()
        original.header.frame_id = source_frame
        original.header.stamp = source_timestamp
        original.point = location
        pose = tf2_geometry_msgs.do_transform_point(original, transform)
        return Point(x=pose.point.x, y=pose.point.y, z=pose.point.z)

    def _can_transform_between_frames(self, source_frame, source_timestamp,
                                      dest_frame, dest_timestamp):
        return self._tf_buffer.can_transform_full(target_frame=dest_frame,
                                                  target_time=dest_timestamp,
                                                  source_frame=source_frame,
                                                  source_time=source_timestamp,
                                                  fixed_frame="cora/odom",
                                                  timeout=rospy.Duration(1.0))

    def run(self):
        rospy.spin()