Esempio n. 1
0
 def filter_for_edge_length(self, bba_in, lower_limit, upper_limit):
     if upper_limit < lower_limit:
         rospy.log_err("upper_limit is smaller than lower_limit")
         sys.exit()
     
     bba_out = DetectionArray()
     for bb in bba_in.detections:
         #print "bba_x =", bb.bounding_box_lwh.x, "bba_y =", bb.bounding_box_lwh.y
         if bb.bounding_box_lwh.x < upper_limit and bb.bounding_box_lwh.x > lower_limit and bb.bounding_box_lwh.y < upper_limit and bb.bounding_box_lwh.y > lower_limit:
             bba_out.detections.append(bb)
     return bba_out
Esempio n. 2
0
 def filter_for_area_size(self, bba_in, lower_limit, upper_limit):
     if upper_limit < lower_limit:
         rospy.log_err("upper_limit is smaller than lower_limit")
         sys.exit()
     
     bba_out = DetectionArray()
     for bb in bba_in.detections:
         area = bb.bounding_box_lwh.x * bb.bounding_box_lwh.y
         #print "area =", area
         if area < upper_limit and area > lower_limit:
             bba_out.detections.append(bb)
     return bba_out
Esempio n. 3
0
 def filter_for_edge_relation(self, bba_in, lower_limit, upper_limit):
     if upper_limit < lower_limit:
         rospy.log_err("upper_limit is smaller than lower_limit")
         sys.exit()
     bba_out = DetectionArray()
     for bb in bba_in.detections:
         xy_relation = bb.bounding_box_lwh.x / bb.bounding_box_lwh.y
         if xy_relation > 1:
             xy_relation = 1/xy_relation
         #print "edge_relation =", xy_relation
         if xy_relation > lower_limit and xy_relation < upper_limit:
             bba_out.detections.append(bb)
     return bba_out
    def get_tl_coords_in_image(self, coords_in_world):
        """Get transform from (X,Y,Z) world coords to (x,y) camera coords. 
        See https://github.com/udacity/CarND-Capstone/issues/24
        Args:
            coords_in_world : TrafficLight coordinates
        """

        self.listener.waitForTransform("/world", "/base_link", rospy.Time(),
                                       rospy.Duration(1.0))
        try:
            now = rospy.Time.now()
            self.listener.waitForTransform("/world", "/base_link", now,
                                           rospy.Duration(1.0))
            (trans,
             rot) = self.listener.lookupTransform("/world", "/base_link", now)
            #print("Got map transform")
        except (tf.Exception, tf.LookupException, tf.ConnectivityException):
            rospy.log_err("Couldn't find camera to map transform.")
            #print("Can't get map transform")

        # do 3D rotation and translation of light coords from world to car frame
        x_world = coords_in_world.x
        y_world = coords_in_world.y
        z_world = coords_in_world.z
        e = tf.transformations.euler_from_quaternion(rot)
        cos_yaw = math.cos(e[2])
        sin_yaw = math.sin(e[2])
        x_car = x_world * cos_yaw - y_world * sin_yaw + trans[0]
        y_car = x_world * sin_yaw + y_world * cos_yaw + trans[1]
        z_car = z_world + trans[2]

        # use camera projection matrix to translate world coords to camera pixel coords
        # http://docs.ros.org/melodic/api/sensor_msgs/html/msg/CameraInfo.html
        uvw = np.dot(self.camera_info.P, [x_car, y_car, z_car, 1])
        camera_x = uvw[0] / uvw[2]
        camera_y = uvw[1] / uvw[2]

        #focal_length = 2300
        #half_image_width = 400
        #half_image_height = 300
        #x_offset = -30
        #y_offset = 340
        #half_image_width = 400
        #half_image_height = 300

        return (camera_x, camera_y)