class PredictionSummarizer(object):
    """Node that decides whether to ask a question from uncertainty metrics."""
    def __init__(self):
        """Init."""
        super(PredictionSummarizer, self).__init__()
        rospy.on_shutdown(self.shutdown)

        self.pub = rospy.Publisher('prediction_summary', PredictionSummary)
        self.tss = TimeSynchronizer(subscribers, queue_size=3)
        # self.tss = \
        #     ApproximateTimeSynchronizer(subscribers, queue_size=3, slop=5)

        self.tss.registerCallback(self.callback)

    def callback(self, prediction, entropy, margin, all_predictions,
                 named_predictions):
        """Produce a summary msg and publish it."""
        summary = PredictionSummary(predicted=prediction,
                                    entropy=entropy,
                                    margin=margin,
                                    all_predictions=all_predictions,
                                    named_predictions=named_predictions)
        self.pub.publish(summary)

    def shutdown(self):
        """Hook to be executed when rospy.shutdown is called."""
        pass

    def run(self):
        """Run the node."""
        rospy.spin()
Пример #2
0
 def __init__(self, name):
     rospy.loginfo("Starting %s..." % name)
     self.vis_pub = rospy.Publisher("~visualization_marker",
                                    Marker,
                                    queue_size=1)
     self.tf = TransformListener()
     self.cc = CostmapCreator(
         rospy.Publisher("/velocity_costmap",
                         OccupancyGrid,
                         queue_size=10,
                         latch=True),
         rospy.Publisher("~origin", PoseStamped, queue_size=10))
     self.dyn_srv = DynServer(VelocityCostmapsConfig, self.dyn_callback)
     subs = [
         Subscriber(
             rospy.get_param("~qtc_topic",
                             "/qtc_state_predictor/prediction_array"),
             QTCPredictionArray),
         Subscriber(
             rospy.get_param("~ppl_topic", "/people_tracker/positions"),
             PeopleTracker)
     ]
     ts = TimeSynchronizer(fs=subs, queue_size=60)
     ts.registerCallback(self.callback)
     rospy.loginfo("... all done.")
Пример #3
0
    def test_synchronizer(self):
        m0 = MockFilter()
        m1 = MockFilter()
        ts = TimeSynchronizer([m0, m1], 1)
        ts.registerCallback(self.cb_collector_2msg)

        if 0:
            # Simple case, pairs of messages, make sure that they get combined
            for t in range(10):
                self.collector = []
                msg0 = MockMessage(t, 33)
                msg1 = MockMessage(t, 34)
                m0.signalMessage(msg0)
                self.assertEqual(self.collector, [])
                m1.signalMessage(msg1)
                self.assertEqual(self.collector, [(msg0, msg1)])

        # Scramble sequences of length N.  Make sure that TimeSequencer recombines them.
        random.seed(0)
        for N in range(1, 10):
            m0 = MockFilter()
            m1 = MockFilter()
            seq0 = [MockMessage(t, random.random()) for t in range(N)]
            seq1 = [MockMessage(t, random.random()) for t in range(N)]
            # random.shuffle(seq0)
            ts = TimeSynchronizer([m0, m1], N)
            ts.registerCallback(self.cb_collector_2msg)
            self.collector = []
            for msg in random.sample(seq0, N):
                m0.signalMessage(msg)
            self.assertEqual(self.collector, [])
            for msg in random.sample(seq1, N):
                m1.signalMessage(msg)
            self.assertEqual(set(self.collector), set(zip(seq0, seq1)))
Пример #4
0
    def test_synchronizer(self):
        m0 = MockFilter()
        m1 = MockFilter()
        ts = TimeSynchronizer([m0, m1], 1)
        ts.registerCallback(self.cb_collector_2msg)

        if 0:
            # Simple case, pairs of messages, make sure that they get combined
            for t in range(10):
                self.collector = []
                msg0 = MockMessage(t, 33)
                msg1 = MockMessage(t, 34)
                m0.signalMessage(msg0)
                self.assertEqual(self.collector, [])
                m1.signalMessage(msg1)
                self.assertEqual(self.collector, [(msg0, msg1)])

        # Scramble sequences of length N.  Make sure that TimeSequencer recombines them.
        random.seed(0)
        for N in range(1, 10):
            m0 = MockFilter()
            m1 = MockFilter()
            seq0 = [MockMessage(t, random.random()) for t in range(N)]
            seq1 = [MockMessage(t, random.random()) for t in range(N)]
            # random.shuffle(seq0)
            ts = TimeSynchronizer([m0, m1], N)
            ts.registerCallback(self.cb_collector_2msg)
            self.collector = []
            for msg in random.sample(seq0, N):
                m0.signalMessage(msg)
            self.assertEqual(self.collector, [])
            for msg in random.sample(seq1, N):
                m1.signalMessage(msg)
            self.assertEqual(set(self.collector), set(zip(seq0, seq1)))
    def __init__(self):
        """Constructor."""
        # Node initialization.
        rospy.init_node("scan_filter", anonymous=True)
        rospy.sleep(1)

        # Parameter reading.
        self.map_inflation_radius = float(
            rospy.get_param('map_inflation_radius'))
        self.robot_count = rospy.get_param('/robot_count')
        # Robot ID starting from 1, as navigation_2d
        self.robot_id = rospy.get_param('~robot_id')
        rospy.logerr("Received Robot ID: {} Robot Count: {}".format(
            self.robot_id, self.robot_count))
        # Initialization of variables.
        self.all_poses = {
        }  # Poses of other robots in the world reference frame.

        # Get pose from other robots.
        for i in xrange(0, self.robot_count):
            if i != self.robot_id:
                s = "def a_{0}(self, data): self.all_poses[{0}] = (data.pose.pose.position.x," \
                    "data.pose.pose.position.y," \
                    "(data.pose.pose.orientation.x,data.pose.pose.orientation.y,data.pose.pose.orientation.z," \
                    "data.pose.pose.orientation.w), data.header.stamp.to_sec())".format(i)
                exec(s)
                exec("setattr(ScanFilter, 'callback_pos_teammate{0}', a_{0})".
                     format(i))
                exec(
                    "rospy.Subscriber('/robot_{0}/base_pose_ground_truth', Odometry, self.callback_pos_teammate{0}, "
                    "queue_size=1)".format(i))

        # Assumes that apart from the readings, and timestamp, the rest won't change.
        self.scan = rospy.wait_for_message('base_scan', LaserScan)
        self.scan.ranges = list(self.scan.ranges)
        self.scan_ranges_size = len(self.scan.ranges)

        # Subscriber of the own robot.
        self.base_pose_subscriber = message_filters.Subscriber(
            'base_pose_ground_truth', Odometry)
        self.scan_subscriber = message_filters.Subscriber(
            'base_scan', LaserScan)
        ats = TimeSynchronizer(
            [self.base_pose_subscriber, self.scan_subscriber], 1)  #, 0.01)
        ats.registerCallback(self.pose_scan_callback)

        # Publisher of the filtered scan.
        self.scan_pub = rospy.Publisher('filtered_scan',
                                        LaserScan,
                                        queue_size=1)
        # Creation of the message.

        rospy.Subscriber('/shutdown', String, self.save_all_data)
class CameraSynchronizer:
    def __init__(self, camera_name):
        rospy.logerr("Initializing!")
        self.camera_name = camera_name

        if (self.camera_name == "right_hand"):
            self.image_sub = Subscriber("/cameras/right_hand_camera/image",
                                        Image)
            self.info_sub = Subscriber(
                "/cameras/right_hand_camera/camera_info_std", CameraInfo)

        elif (self.camera_name == "top"):
            rospy.loginfo("Initializing top camera!")
            self.image_sub = Subscriber("/camera/rgb/image_raw", Image)
            self.info_sub = Subscriber("/camera/rgb/camera_info", CameraInfo)

    def subscribe(self):
        # self.ats = ApproximateTimeSynchronizer(
        #    [self.image_sub, self.info_sub], queue_size=5, slop=0.5)
        # self.ats.registerCallback(self.gotImage)

        self.ts = TimeSynchronizer(
            [self.image_sub, self.info_sub],
            queue_size=5,
        )
        self.ts.registerCallback(self.gotImage)

    def publish(self):
        self.image_pub = rospy.Publisher("/" + self.camera_name +
                                         "/synch_camera/image_rect",
                                         Image,
                                         queue_size=10)
        self.info_pub = rospy.Publisher("/" + self.camera_name +
                                        "/synch_camera/camera_info",
                                        CameraInfo,
                                        queue_size=10)

    def gotImage(self, image, camerainfo):
        rospy.loginfo("Got an image!")
        print("GOT AN IMAGE")
        assert image.header.stamp == camerainfo.header.stamp
        rospy.loginfo("got an Image and CameraInfo")

        self.image_pub.publish(image)
        self.info_pub.publish(camerainfo)
Пример #7
0
class teleop_and_stop:
    def __init__(self):
        #self.move_sub = Subscriber()
        self.scan_sub = Subscriber('/scan', LaserScan)
        #self.scan_sub.registerCallback(self.test_scan_callback)
        self.move_pub = rospy.Publisher('/cmd_vel_mux/input/teleop',
                                        Twist,
                                        queue_size=10)
        self.teleop_sub = rospy.Subscriber('/my_teleop', Twist, self.add_stamp)
        self.altered_input_pub = rospy.Publisher('/my_altered_input',
                                                 TwistStamped,
                                                 queue_size=10)
        self.altered_input_sub = Subscriber('/my_altered_input', TwistStamped)
        #self.altered_input_sub.registerCallback(self.altered_input_callback)
        self.ats = TimeSynchronizer([self.altered_input_sub, self.scan_sub],
                                    queue_size=10)
        self.ats.registerCallback(self.scan_callback)

    def add_stamp(self, input):
        output = TwistStamped()
        output.twist = input
        output.header = Header()
        output.header.stamp = rospy.Time.now()
        self.altered_input_pub.publish(output)
        #prin/cmd_vel_mux/input/teleopt "at line 33"

    def altered_input_callback(self, input):
        print "got altered input"

    def test_scan_callback(self, scan):
        print "got a scan"

    def scan_callback(self, vel, scan):  #For some reason it never gets here!
        near_wall = False
        for i in scan.ranges:
            if i <= 0.5 and vel.twist.linear.x >= 0:
                #print True
                self.move_pub.publish(Twist())
                near_wall = True
            elif (not near_wall):
                self.move_pub.publish(vel.twist)
Пример #8
0
if __name__ == '__main__':
    rospy.init_node('schedule_status_printer')

    all_topics = rospy.get_published_topics()
    all_tasks = 'task_executor/all_tasks'
    current_schedule = 'current_schedule'

    use_all_tasks = False
    for topic, type in all_topics:
        if topic.endswith(all_tasks):
            use_all_tasks = True
            break

    if use_all_tasks:

        rospy.Subscriber('mdp_plan_exec/execute_policy/goal',
                         ExecutePolicyActionGoal, policy_goal_callback)
        rospy.Subscriber('mdp_plan_exec/execute_policy/feedback',
                         ExecutePolicyActionFeedback, policy_feedback_callback)

        from message_filters import Subscriber, TimeSynchronizer
        ts = TimeSynchronizer([
            Subscriber(all_tasks, ExecutionStatus),
            Subscriber(current_schedule, ExecutionStatus)
        ], 1)
        ts.registerCallback(all_tasks_cb)
    else:
        rospy.Subscriber('current_schedule', ExecutionStatus, callback)

    # spin() simply keeps python from exiting until this node is stopped
    rospy.spin()
Пример #9
0
class MappingNode(Mapping):
    def __init__(self):
        super(MappingNode, self).__init__()

        self.lock = threading.RLock()
        self.use_slam_traj = True

    def init_node(self, ns="~"):
        self.use_slam_traj = rospy.get_param(ns + "use_slam_traj", True)

        self.x0, self.y0 = rospy.get_param(ns + "origin")
        self.width, self.height = rospy.get_param(ns + "size")
        self.resolution = rospy.get_param(ns + "resolution")
        self.inc = rospy.get_param(ns + "inc")

        self.pub_occupancy1 = rospy.get_param(ns + "pub_occupancy1")
        self.hit_prob = rospy.get_param(ns + "hit_prob")
        self.miss_prob = rospy.get_param(ns + "miss_prob")
        self.inflation_angle = rospy.get_param(ns + "inflation_angle")
        self.inflation_radius = rospy.get_param(ns + "inflation_range")

        self.pub_occupancy2 = rospy.get_param(ns + "pub_occupancy2")
        self.inflation_radius = rospy.get_param(ns + "inflation_radius")
        self.outlier_filter_radius = rospy.get_param(ns +
                                                     "outlier_filter_radius")
        self.outlier_filter_min_points = rospy.get_param(
            ns + "outlier_filter_min_points")

        self.pub_intensity = rospy.get_param(ns + "pub_intensity")

        # Only update keyframe that has significant movement
        self.min_translation = rospy.get_param(ns + "min_translation")
        self.min_rotation = rospy.get_param(ns + "min_rotation")

        self.sonar_sub = Subscriber(SONAR_TOPIC, OculusPing)
        if self.use_slam_traj:
            self.traj_sub = Subscriber(SLAM_TRAJ_TOPIC, PointCloud2)
        else:
            self.traj_sub = Subscriber(LOCALIZATION_TRAJ_TOPIC, PointCloud2)
        # Method 1
        if self.pub_occupancy1:
            self.feature_sub = Subscriber(SONAR_FEATURE_TOPIC, PointCloud2)
        # Method 2
        if self.pub_occupancy2:
            self.feature_sub = Subscriber(SLAM_CLOUD_TOPIC, PointCloud2)

        # The time stamps for trajectory and ping have to be exactly the same
        # A big queue_size is required to assure no keyframe is missed especially
        # for offline playing.
        self.ts = TimeSynchronizer(
            [self.traj_sub, self.sonar_sub, self.feature_sub], 100)
        self.ts.registerCallback(self.tpf_callback)

        self.intensity_map_pub = rospy.Publisher(MAPPING_INTENSITY_TOPIC,
                                                 OccupancyGrid,
                                                 queue_size=1,
                                                 latch=True)
        self.occupancy_map_pub = rospy.Publisher(MAPPING_OCCUPANCY_TOPIC,
                                                 OccupancyGrid,
                                                 queue_size=1,
                                                 latch=True)

        self.get_map_srv = rospy.Service(ns + "get_map", GetOccupancyMap,
                                         self.get_map)

        self.configure()
        loginfo("Mapping node is initialized")

    def get_map(self, req):
        resp = GetOccupancyMapResponse()
        with self.lock:
            occ_msg = self.get_occupancy_grid(req.frames, req.resolution)
            resp.occ = occ_msg

        return resp

    @add_lock
    def tpf_callback(self, traj_msg, ping, feature_msg):
        self.lock.acquire()
        with CodeTimer("Mapping - add keyframe"):
            traj = r2n(traj_msg)
            pose = pose322(n2g(traj[-1, :6], "Pose3"))
            points = r2n(feature_msg)
            self.add_keyframe(len(traj) - 1, pose, ping, points)

        with CodeTimer("Mapping - update keyframe"):
            for x in range(len(traj) - 1):
                pose = pose322(n2g(traj[x, :6], "Pose3"))
                self.update_pose(x, pose)

        if self.pub_intensity:
            with CodeTimer("Mapping - publish intensity map"):
                intensity_msg = self.get_intensity_grid()
                intensity_msg.header.stamp = ping.header.stamp
                if not self.use_slam_traj:
                    intensity_msg.header.frame_id = "odom"
                self.intensity_map_pub.publish(intensity_msg)

        if self.pub_occupancy1:
            with CodeTimer("Mapping - publish occupancy map"):
                occupancy_msg = self.get_occupancy_grid1()
                occupancy_msg.header.stamp = ping.header.stamp
                if not self.use_slam_traj:
                    occupancy_msg.header.frame_id = "odom"
                self.occupancy_map_pub.publish(occupancy_msg)

        if self.pub_occupancy2:
            with CodeTimer("Mapping - publish occupancy map"):
                occupancy_msg = self.get_occupancy_grid2()
                occupancy_msg.header.stamp = ping.header.stamp
                if not self.use_slam_traj:
                    occupancy_msg.header.frame_id = "odom"
                self.occupancy_map_pub.publish(occupancy_msg)

        # # Why doesn't this work?
        # # Delete unnecessary ping cached in time synchronizer since we use a big queue.
        # q = self.ts.queues[1]
        # self.ts.queues[1] = {
        #     t: m for t, m in q.items() if t >= ping.header.stamp
        # }
        self.lock.release()
        if self.save_fig:
            self.save_submaps()

    def save_submaps(self):
        submaps = []
        for keyframe in self.keyframes:
            submap = (
                g2n(keyframe.pose),
                keyframe.r,
                keyframe.c,
                keyframe.l,
                keyframe.i,
                keyframe.cimg,
                keyframe.limg,
            )
            submaps.append(submap)
        np.savez(
            "step-{}-submaps.npz".format(len(self.keyframes) - 1),
            submaps=submaps,
            map_size=(self.x0, self.y0, self.width, self.height,
                      self.resolution),
        )
Пример #10
0
    def __init__(self, args, n):
        # Auxiliar variables

        self.init_scene = False
        self.use_gaussian_noise = False
        self.filter_hdmap = True

        self.display = args[0]
        self.trajectory_prediction = args[1]
        self.ros = args[2]
        self.grid = False

        self.image_width = 0  # This value will be updated
        self.image_height = 1000  # Pixels

        self.shapes = []
        self.view_ahead = 0

        self.n = np.zeros(
            (n, 1)
        )  # The bounding boxes will be predicted n boxes ahead, regarding its velocity

        self.ego_vehicle_x, self.ego_vehicle_y = 0.0, 0.0
        self.ego_orientation_cumulative_diff = 0  # Cumulative orientation w.r.t the original odometry of the ego-vehicle (radians)
        self.initial_angle = False
        self.previous_angle = float(0)
        self.pre_yaw = float(0)
        self.current_yaw = float(0)
        self.ego_trajectory_prediction_bb = np.zeros(
            (5, self.n.shape[0]))  # (x,y,s,r,theta) n times

        self.write_video = False
        self.video_flag = False
        self.start = float(0)
        self.end = float(0)

        self.frame_no = 0
        self.avg_fps = float(0)

        self.colours = np.random.rand(32, 3)

        # Emergency break

        self.cont = 0
        self.collision_flag = std_msgs.msg.Bool()
        self.collision_flag.data = False
        self.emergency_break_timer = float(0)
        self.nearest_object_in_route = 50000

        self.ego_braking_distance = 0

        self.rc_max = rospy.get_param("/controller/rc_max")
        self.geometric_monitorized_area = []

        # ROS publishers

        self.pub_monitorized_area = rospy.Publisher(
            "/t4ac/perception/detection/monitorized_area_marker",
            visualization_msgs.msg.Marker,
            queue_size=20)
        self.pub_bev_sort_tracking_markers_list = rospy.Publisher(
            '/t4ac/perception/tracking/obstacles_markers',
            visualization_msgs.msg.MarkerArray,
            queue_size=20)
        self.pub_particular_monitorized_area_markers_list = rospy.Publisher(
            '/t4ac/perception/monitors/individual_monitorized_area',
            visualization_msgs.msg.MarkerArray,
            queue_size=20)
        self.pub_collision = rospy.Publisher(
            '/t4ac/perception/monitors/predicted_collision',
            std_msgs.msg.Bool,
            queue_size=20)
        self.pub_nearest_object_distance = rospy.Publisher(
            '/t4ac/perception/monitors/nearest_object_distance',
            std_msgs.msg.Float64,
            queue_size=20)

        # ROS subscribers

        if not self.filter_hdmap:
            self.sub_road_curvature = rospy.Subscriber(
                "/control/rc", std_msgs.msg.Float64,
                self.road_curvature_callback)
        self.detections_topic = "/t4ac/perception/detection/merged_obstacles"
        self.odom_topic = "/localization/pose"
        self.monitorized_lanes_topic = "/mapping_planning/monitor/lanes"

        self.detections_subscriber = Subscriber(self.detections_topic,
                                                BEV_detections_list)
        self.odom_subscriber = Subscriber(self.odom_topic,
                                          nav_msgs.msg.Odometry)
        self.monitorized_lanes_subscriber = Subscriber(
            self.monitorized_lanes_topic, MonitorizedLanes)

        ts = TimeSynchronizer([
            self.detections_subscriber, self.odom_subscriber,
            self.monitorized_lanes_subscriber
        ], header_synchro)
        ts.registerCallback(self.callback)

        # Listeners

        self.listener = tf.TransformListener()
Пример #11
0
    def callback(self, detections_rosmsg, odom_rosmsg,
                 monitorized_lanes_rosmsg):
        """
        """

        try:  # Target # Pose
            (translation, quaternion) = self.listener.lookupTransform(
                '/map', '/ego_vehicle/lidar/lidar1', rospy.Time(0))
            # rospy.Time(0) get us the latest available transform
            rot_matrix = tf.transformations.quaternion_matrix(quaternion)

            self.tf_map2lidar = rot_matrix
            self.tf_map2lidar[:3,
                              3] = self.tf_map2lidar[:3,
                                                     3] + translation  # This matrix transforms local to global coordinates

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            print("\033[1;33m" + "TF exception" + '\033[0;m')

        self.start = time.time()

        self.detections_subscriber.unregister()
        self.odom_subscriber.unregister()
        self.monitorized_lanes_subscriber.unregister()

        # Initialize the scene

        if not self.init_scene:
            self.real_height = detections_rosmsg.front - detections_rosmsg.back  # Grid height (m)
            self.real_front = detections_rosmsg.front + self.view_ahead  # To study obstacles (view_head) meters ahead
            self.real_width = -detections_rosmsg.left + detections_rosmsg.right  # Grid width (m)
            self.real_left = -detections_rosmsg.left

            r = float(self.image_height) / self.real_height

            self.image_width = int(round(self.real_width *
                                         r))  # Grid width (pixels)
            self.image_front = int(round(self.real_front * r))
            self.image_left = int(round(self.real_left * r))

            print("Real width: ", self.real_width)
            print("Real height: ", self.real_height)
            print("Image width: ", self.image_width)
            print("Image height: ", self.image_height)

            self.shapes = (self.real_front, self.real_left, self.image_front,
                           self.image_left)
            self.scale_factor = (self.image_height / self.real_height,
                                 self.image_width / self.real_width)

            # Our centroid is defined in BEV camera coordinates but centered in the ego-vehicle.
            # We need to traslate it to the top-left corner (required by the SORT algorithm)

            self.tf_bevtl2bevcenter_m = np.array(
                [[1.0, 0.0, 0.0, self.shapes[1]],
                 [0.0, 1.0, 0.0, self.shapes[0]], [0.0, 0.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0]])

            # From BEV centered to BEV top-left (in pixels)

            self.tf_bevtl2bevcenter_px = np.array(
                [[1.0, 0.0, 0.0, -self.shapes[3]],
                 [0.0, 1.0, 0.0, -self.shapes[2]], [0.0, 0.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0]])

            # Rotation matrix from LiDAR frame to BEV frame (to convert to global coordinates)

            self.tf_lidar2bev = np.array([[0.0, -1.0, 0.0, 0.0],
                                          [-1.0, 0.0, 0.0, 0.0],
                                          [0.0, 0.0, -1.0, 0.0],
                                          [0.0, 0.0, 0.0, 1.0]])

            # Initialize the regression model to calculate the braking distance

            self.velocity_braking_distance_model = monitors_functions.fit_velocity_braking_distance_model(
            )

            # Tracker

            self.mot_tracker = sort_functions.Sort(max_age, min_hits, n,
                                                   self.shapes,
                                                   self.trajectory_prediction,
                                                   self.filter_hdmap)

            self.init_scene = True

        output_image = np.ones(
            (self.image_height, self.image_width, 3),
            dtype="uint8")  # Black image to represent the scene

        print("----------------")

        # Initialize ROS and monitors variables

        self.trackers_marker_list = visualization_msgs.msg.MarkerArray(
        )  # Initialize trackers list
        monitorized_area_colours = []

        trackers_in_route = 0
        nearest_distance = std_msgs.msg.Float64()
        nearest_distance.data = float(self.nearest_object_in_route)

        world_features = []
        trackers = []
        dynamic_trackers = []
        static_trackers = []
        ndt = 0

        timer_rosmsg = detections_rosmsg.header.stamp.to_sec()

        # Predict the ego-vehicle trajectory

        monitors_functions.ego_vehicle_prediction(self, odom_rosmsg,
                                                  output_image)

        print("Braking distance ego vehicle: ",
              float(self.ego_braking_distance))

        # Convert input data to bboxes to perform Multi-Object Tracking

        #print("\nNumer of total detections: ", len(detections_rosmsg.bev_detections_list))
        bboxes_features, types = sort_functions.bbox_to_xywh_cls_conf(
            self, detections_rosmsg, odom_rosmsg, detection_threshold,
            output_image)
        #print("Number of relevant detections: ", len(bboxes_features)) # score > detection_threshold

        # Emergency break (Only detection)
        """
        nearest_object_in_route = 99999

        if not self.collision_flag.data:
            for bbox,type_object in zip(bboxes_features,types):
                for lane in monitorized_lanes_rosmsg.lanes: 
                    if (lane.role == "current" and len(lane.left.way) >= 2):
                        detection = Node()

                        bbox = bbox.reshape(1,-1)
                        global_pos = sort_functions.store_global_coordinates(self.tf_map2lidar,bbox)
                        detection.x = global_pos[0,0]
                        detection.y = -global_pos[1,0] # N.B. In CARLA this coordinate is the opposite

                        is_inside, particular_monitorized_area, dist2centroid = monitors_functions.inside_lane(lane,detection,type_object)

                        if is_inside:
                            #distance_to_object = monitors_functions.calculate_distance_to_nearest_object_inside_route(monitorized_lanes_rosmsg,global_pos)
                            
                            
                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y

                            distance_to_object = math.sqrt(pow(ego_x_global-detection.x,2)+pow(ego_y_global-detection.y,2))
                            
                            
                            
                            
                            
                            
                            print("Distance to object: ", distance_to_object)
                            if distance_to_object < self.nearest_object_in_route:
                                self.nearest_object_in_route = distance_to_object
                                nearest_distance.data = float(self.nearest_object_in_route)

                            print("Braking distance: ", self.ego_braking_distance)
                            
                            if distance_to_object < self.ego_braking_distance:
                                print("Break")
                                self.collision_flag.data = True
                                self.pub_collision.publish(self.collision_flag)
                                self.emergency_break_timer = time.time()
                                self.cont = 0
                                break
                else: 
                    continue 
                break
        else:
            dist = 99999
            for bbox,type_object in zip(bboxes_features,types):
                for lane in monitorized_lanes_rosmsg.lanes: 
                    if (lane.role == "current" and len(lane.left.way) >= 2):
                        detection = Node()

                        bbox = bbox.reshape(1,-1)
                        global_pos = sort_functions.store_global_coordinates(self.tf_map2lidar,bbox)
                        detection.x = global_pos[0,0]
                        detection.y = -global_pos[1,0] # N.B. In CARLA this coordinate is the opposite

                        is_inside, particular_monitorized_area, dist2centroid = monitors_functions.inside_lane(lane,detection,type_object)
                        print("inside: ", is_inside)
                        if is_inside:
                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y

                            aux = math.sqrt(pow(ego_x_global-detection.x,2)+pow(ego_y_global-detection.y,2))

                            if aux < dist:
                                dist = aux
                        break

            if dist > 5:
                self.cont += 1
            else:
                self.cont = 0
            print("distance to object after collision: ", dist)
            print("cont: ", self.cont)

        if self.cont >= 3:
            self.collision_flag.data = False
            self.nearest_object_in_route = 50000
            nearest_distance.data = float(self.nearest_object_in_route)

        print("Data: ", nearest_distance.data)
        print("Collision: ", self.collision_flag.data)

        self.pub_nearest_object_distance.publish(nearest_distance)
        self.pub_collision.publish(self.collision_flag)
        """
        ## Multi-Object Tracking

        # TODO: Publish on tracked_obstacle message instead of visualization marker
        # TODO: Evaluate the predicted position to predict its influence in a certain use case

        if (len(bboxes_features) > 0):  # At least one object was detected
            trackers, object_types, object_scores, object_observation_angles, dynamic_trackers, static_trackers = self.mot_tracker.update(
                bboxes_features, types, self.ego_vel_px, self.tf_map2lidar,
                self.shapes, self.scale_factor, monitorized_lanes_rosmsg,
                timer_rosmsg, self.angle_bb, self.geometric_monitorized_area)

            #print("Number of trackers: ", len(trackers))
            if len(dynamic_trackers.shape) == 3:
                #print("Dynamic trackers", dynamic_trackers.shape[1])
                ndt = dynamic_trackers.shape[1]
            else:
                #print("Dynamic trackers: ", 0)
                ndt = 0
            #print("Static trackers: ", static_trackers.shape[0])

            id_nearest = -1

            if (len(trackers) > 0):  # At least one object was tracked
                for i, tracker in enumerate(trackers):
                    object_type = object_types[i]
                    object_score = object_scores[i]
                    object_rotation = np.float64(object_observation_angles[i,
                                                                           0])
                    object_observation_angle = np.float64(
                        object_observation_angles[i, 1])

                    color = self.colours[tracker[5].astype(int) % 32]
                    #print("hago append")
                    monitorized_area_colours.append(color)

                    if self.ros:
                        world_features = monitors_functions.tracker_to_topic(
                            self, tracker, object_type,
                            color)  # world_features (w,l,h,x,y,z,id)
                        #print("WF: ", world_features)
                        if kitti:
                            num_image = detections_rosmsg.header.seq - 1  # Number of image in the dataset, e.g. 0000.txt -> 0
                            object_properties = object_observation_angle, object_rotation, object_score
                            monitors_functions.store_kitti(
                                num_image, path, object_type, world_features,
                                object_properties)

                    if (self.display):
                        my_thickness = -1
                        geometric_functions.compute_and_draw(
                            tracker, color, my_thickness, output_image)

                    label = 'ID %06d' % tracker[5].astype(int)
                    cv2.putText(
                        output_image, label,
                        (tracker[0].astype(int), tracker[1].astype(int) - 20),
                        cv2.FONT_HERSHEY_PLAIN, 1.5, [255, 255, 255], 2)
                    cv2.putText(
                        output_image, object_type,
                        (tracker[0].astype(int), tracker[1].astype(int) - 40),
                        cv2.FONT_HERSHEY_PLAIN, 1.5, [255, 255, 255], 2)

                    # Evaluate if there is some obstacle in lane and calculate nearest distance

                    if self.filter_hdmap:
                        if tracker[-1]:  # In route, last element of the array
                            trackers_in_route += 1
                            obstacle_local_position = np.zeros((1, 9))

                            obstacle_local_position[0, 7] = world_features[3]
                            obstacle_local_position[0, 8] = world_features[4]

                            obstacle_global_position = sort_functions.store_global_coordinates(
                                self.tf_map2lidar, obstacle_local_position)

                            #distance_to_object = monitors_functions.calculate_distance_to_nearest_object_inside_route(monitorized_lanes_rosmsg,obstacle_global_position)

                            detection = Node()

                            detection.x = obstacle_global_position[0, 0]
                            detection.y = -obstacle_global_position[1, 0]

                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y

                            distance_to_object = math.sqrt(
                                pow(ego_x_global - detection.x, 2) +
                                pow(ego_y_global - detection.y, 2))
                            distance_to_object -= 5  # QUITARLO, DEBERIA SER DISTANCIA CENTROIDE OBJETO A MORRO, EN VEZ DE LIDAR A LIDAR, POR ESO
                            # LE METO ESTE OFFSET

                            #print("Distance to object: ", distance_to_object)
                            if distance_to_object < self.nearest_object_in_route:
                                id_nearest = tracker[5]
                                self.nearest_object_in_route = distance_to_object
                    else:
                        # Evaluate in the geometric monitorized area

                        x = world_features[3]
                        y = world_features[4]

                        print("main")
                        print("goemetric area: ",
                              self.geometric_monitorized_area)
                        print("x y: ", x, y)

                        if (x < self.geometric_monitorized_area[0]
                                and x > self.geometric_monitorized_area[1]
                                and y < self.geometric_monitorized_area[2]
                                and y > self.geometric_monitorized_area[3]):
                            trackers_in_route += 1
                            self.cont = 0
                            print("\n\n\nDentro")
                            distance_to_object = math.sqrt(
                                pow(x, 2) + pow(y, 2))
                            print("Nearest: ", self.nearest_object_in_route)
                            print("distance: ", distance_to_object)
                            if distance_to_object < self.nearest_object_in_route:
                                self.nearest_object_in_route = distance_to_object

                print("Collision: ", self.collision_flag.data)
                print("trackers in route: ", trackers_in_route)
                print("Distance nearest: ", self.nearest_object_in_route)
                if self.collision_flag.data and (
                        trackers_in_route == 0 or
                    (self.nearest_object_in_route > 12 and self.abs_vel < 1)):
                    print("suma A")
                    self.cont += 1
                else:
                    self.cont == 0

                nearest_distance.data = self.nearest_object_in_route

                if (self.trajectory_prediction):

                    collision_id_list = [[], []]

                    # Evaluate collision with dynamic trackers

                    for a in range(dynamic_trackers.shape[1]):
                        for j in range(self.n.shape[0]):
                            e = dynamic_trackers[j][a]
                            color = self.colours[e[5].astype(int) % 32]
                            my_thickness = 2
                            geometric_functions.compute_and_draw(
                                e, color, my_thickness, output_image)

                            if (self.ros):
                                object_type = "trajectory_prediction"
                                monitors_functions.tracker_to_topic(
                                    self, e, object_type, color, j)

                        # Predict possible collision (including the predicted bounding boxes)

                        collision_id, index_bb = monitors_functions.predict_collision(
                            self.ego_predicted, dynamic_trackers[:, a])

                        if collision_id != -1:
                            collision_id_list[0].append(collision_id)
                            collision_id_list[1].append(index_bb)

                    # Evaluate collision with static trackers

                    for b in static_trackers:
                        if b[-1]:  # In route, last element of the array
                            collision_id, index_bb = monitors_functions.predict_collision(
                                self.ego_predicted, b,
                                static=True)  # Predict possible collision
                            if (collision_id != -1):
                                collision_id_list[0].append(collision_id)
                                collision_id_list[1].append(index_bb)

                    #if self.nearest_object_in_route < self.ego_braking_distance:
                    #    self.collision_flag.data = True

                    # Collision

                    if not self.collision_flag.data:
                        if not collision_id_list[0]:  # Empty
                            #if id_nearest == -1:
                            collision_id_list[0].append(
                                -1
                            )  # The ego-vehicle will not collide with any object
                            collision_id_list[1].append(-1)
                            self.collision_flag.data = False
                        elif collision_id_list[0] and (
                                self.nearest_object_in_route <
                                self.ego_braking_distance
                                or self.nearest_object_in_route < 12):
                            #elif id_nearest != -1 and (self.nearest_object_in_route < self.ego_braking_distance or self.nearest_object_in_route < 12):
                            self.collision_flag.data = True
                            self.cont = 0

                    #print("Collision id list: ", collision_id_list)
                    if (len(collision_id_list) > 1):
                        message = 'Predicted collision with objects: ' + str(
                            collision_id_list[0])
                    else:
                        message = 'Predicted collision with object: ' + str(
                            collision_id_list[0])

                    cv2.putText(output_image, message, (30, 140),
                                cv2.FONT_HERSHEY_PLAIN, 1.5, [255, 255, 255],
                                2)  # Predicted collision message
            else:
                print("\033[1;33m" + "No object to track" + '\033[0;m')
                monitors_functions.empty_trackers_list(self)

                if self.collision_flag.data:
                    print("suma B")
                    self.cont += 1
        else:
            print("\033[1;33m" + "No objects detected" + '\033[0;m')
            monitors_functions.empty_trackers_list(self)

            if self.collision_flag.data:
                print("suma C")
                self.cont += 1

        print("cont: ", self.cont)
        if self.cont >= 3:
            self.collision_flag.data = False
            self.nearest_object_in_route = 50000
            nearest_distance.data = float(self.nearest_object_in_route)

        self.end = time.time()

        fps = 1 / (self.end - self.start)

        self.avg_fps += fps
        self.frame_no += 1

        print("SORT time: {}s, fps: {}, avg fps: {}".format(
            round(self.end - self.start, 3), round(fps, 3),
            round(self.avg_fps / self.frame_no, 3)))

        message = 'Trackers: ' + str(len(trackers))
        cv2.putText(output_image, message, (30, 20), cv2.FONT_HERSHEY_PLAIN,
                    1.5, [255, 255, 255], 2)
        message = 'Dynamic trackers: ' + str(ndt)
        cv2.putText(output_image, message, (30, 50), cv2.FONT_HERSHEY_PLAIN,
                    1.5, [255, 255, 255], 2)
        try:
            message = 'Static trackers: ' + str(static_trackers.shape[0])
        except:
            message = 'Static trackers: ' + str(0)
        cv2.putText(output_image, message, (30, 80), cv2.FONT_HERSHEY_PLAIN,
                    1.5, [255, 255, 255], 2)

        # Publish the list of tracked obstacles and predicted collision

        print("Data: ", nearest_distance.data)
        print("Collision: ", self.collision_flag.data)

        self.pub_bev_sort_tracking_markers_list.publish(
            self.trackers_marker_list)
        self.pub_collision.publish(self.collision_flag)
        self.pub_nearest_object_distance.publish(nearest_distance)

        print("moni: ", len(monitorized_area_colours))
        self.particular_monitorized_area_list = self.mot_tracker.get_particular_monitorized_area_list(
            detections_rosmsg.header.stamp, monitorized_area_colours)
        self.pub_particular_monitorized_area_markers_list.publish(
            self.particular_monitorized_area_list)

        if self.write_video:
            if not self.video_flag:
                fourcc = cv2.VideoWriter_fourcc(*'MJPG')
                self.output = cv2.VideoWriter(
                    "map-filtered-mot.avi", fourcc, 20,
                    (self.image_width, self.image_height))
            self.output.write(output_image)

        # Add a grid to appreciate the obstacles coordinates

        if (self.grid):
            gap = int(
                round(self.view_ahead * (self.image_width / self.real_width)))
            geometric_functions.draw_basic_grid(gap, output_image, pxstep=50)

        if (self.display):
            cv2.imshow("SORT tracking", output_image)
            cv2.waitKey(1)

        self.detections_subscriber = Subscriber(self.detections_topic,
                                                BEV_detections_list)
        self.odom_subscriber = Subscriber(self.odom_topic,
                                          nav_msgs.msg.Odometry)
        self.monitorized_lanes_subscriber = Subscriber(
            self.monitorized_lanes_topic, MonitorizedLanes)

        ts = TimeSynchronizer([
            self.detections_subscriber, self.odom_subscriber,
            self.monitorized_lanes_subscriber
        ], header_synchro)
        ts.registerCallback(self.callback)
Пример #12
0
class LaserPublisher(object):
    def __init__(self):
        if not rospy.core.is_initialized():
            rospy.init_node('laser_test')
            rospy.loginfo("Initialised rospy node: laser_test")

        self.tl = TransformListener()
        self.lp = LaserProjection()

        # Publishers
        self.all_laser_pub = rospy.Publisher('/pepper/laser_2',
                                             LaserScan,
                                             queue_size=1)
        self.pc_pub = rospy.Publisher('/cloud', PointCloud2, queue_size=1)
        self.pcl_pub = rospy.Publisher('/cloudl', PointCloud2, queue_size=1)
        self.pcr_pub = rospy.Publisher('/cloudr', PointCloud2, queue_size=1)
        self.pc_redone_pub = rospy.Publisher('/cloud_redone',
                                             PointCloud2,
                                             queue_size=1)
        self.pc_rereprojected_pub = rospy.Publisher('/cloud_rereprojected',
                                                    PointCloud2,
                                                    queue_size=1)

        # Subscribers
        left_sub = Subscriber('/pepper/scan_left', LaserScan)
        front_sub = Subscriber('/pepper/scan_front', LaserScan)
        right_sub = Subscriber('/pepper/scan_right', LaserScan)

        self.ts = TimeSynchronizer([left_sub, front_sub, right_sub], 10)
        rospy.loginfo("Finished intialising")
        self.ddr = DDR('increment')
        default_increment = radians(120.0 * 2.0) / 61.0
        self.ddr.add_variable('angle_increment', '', default_increment, 0.05,
                              0.08)
        # 130.665
        self.ddr.add_variable('half_max_angle', '', 120., 115., 145.0)
        self.ddr.start(self.dyn_rec_callback)
        self.ts.registerCallback(self.scan_cb)
        rospy.loginfo("Ready to go.")

    def add_variables_to_self(self):
        var_names = self.ddr.get_variable_names()
        for var_name in var_names:
            self.__setattr__(var_name, None)

    def dyn_rec_callback(self, config, level):
        rospy.loginfo("Received reconf call: " + str(config))
        # Update all variables
        var_names = self.ddr.get_variable_names()
        for var_name in var_names:
            self.__dict__[var_name] = config[var_name]
        return config

    def scan_cb(self, left, front, right):
        rospy.loginfo("We got scan_cb")
        translated_points = []
        try:
            pc_left = self.lp.projectLaser(left, channel_options=0x00)
            pc_front = self.lp.projectLaser(front, channel_options=0x00)
            pc_right = self.lp.projectLaser(right, channel_options=0x00)
        except Exception as e:
            rospy.logerr("Failed to transform laser scan because: " + str(e))

        pc_left.header.stamp = rospy.Time.now()
        pc_left.header.frame_id = 'SurroundingLeftLaser_frame'
        self.pcl_pub.publish(pc_left)
        self.pcr_pub.publish(pc_right)

        transform_right_to_front = self.tl.lookupTransform(
            'base_footprint', 'SurroundingRightLaser_frame', rospy.Time.now())
        rospy.logdebug("Transform Right to Front:")
        rospy.logdebug(transform_right_to_front)
        ts = TransformStamped()
        ts.transform.translation = Vector3(*transform_right_to_front[0])
        ts.transform.rotation = Quaternion(*transform_right_to_front[1])
        ts.header.stamp = rospy.Time.now()
        transformed_cloud = do_transform_cloud(pc_right, ts)
        # right point cloud translation
        for p in read_points(transformed_cloud,
                             field_names=('x', 'y', 'z'),
                             skip_nans=False):
            translated_points.append(p)

        for i in range(8):
            translated_points.append(
                (float('nan'), float('nan'), float('nan')))

        transform_front_to_front = self.tl.lookupTransform(
            'base_footprint', 'SurroundingFrontLaser_frame', rospy.Time.now())
        rospy.logdebug("Transform Front to Front:")
        rospy.logdebug(transform_front_to_front)
        ts = TransformStamped()
        ts.transform.translation = Vector3(*transform_front_to_front[0])
        ts.transform.rotation = Quaternion(*transform_front_to_front[1])
        ts.header.stamp = rospy.Time.now()
        transformed_cloud_f = do_transform_cloud(pc_front, ts)

        # front point cloud
        for p in read_points(transformed_cloud_f,
                             field_names=('x', 'y', 'z'),
                             skip_nans=False):
            translated_points.append(p)

        transform_left_to_front = self.tl.lookupTransform(
            'base_footprint', 'SurroundingLeftLaser_frame', rospy.Time.now())
        rospy.logdebug("Transform Left to Front:")
        rospy.logdebug(transform_left_to_front)
        ts = TransformStamped()
        ts.transform.translation = Vector3(*transform_left_to_front[0])
        ts.transform.rotation = Quaternion(*transform_left_to_front[1])
        ts.header.stamp = rospy.Time.now()
        from copy import deepcopy
        transformed_cloud_l = do_transform_cloud(deepcopy(pc_left), ts)

        for i in range(8):
            translated_points.append(
                (float('nan'), float('nan'), float('nan')))

        # left pc translation
        for p in read_points(transformed_cloud_l,
                             field_names=('x', 'y', 'z'),
                             skip_nans=False):
            translated_points.append(p)

        # Create a point cloud from the combined points wrt the front
        # laser frame
        pc_front.header.frame_id = 'base_footprint'
        point_cloud = create_cloud_xyz32(pc_front.header, translated_points)
        self.pc_pub.publish(point_cloud)
        rospy.logdebug("pointcloud all together len: " +
                       str(point_cloud.width))

        # # double check we have the same thing
        # compare_str = "\n"
        # for idx, (tp, pcp) in enumerate(zip(translated_points, read_points(point_cloud))):
        #     compare_str += str(idx).zfill(2) + ":\n"
        #     compare_str += "  tp : " + str(tp)
        #     compare_str += "\n  pcp: " + str(pcp) + "\n"
        # rospy.loginfo(compare_str)
        # # OK we know they are the same
        # # translated_points and point_cloud contain virtually the same data

        # Convert combined point cloud into LaserScan
        all_laser_msg = LaserScan()
        laser_ranges, angle_min, angle_max, angle_increment = self.pc_to_laser(
            point_cloud)
        all_laser_msg.header.frame_id = 'base_footprint'
        all_laser_msg.header.stamp = rospy.Time.now()
        all_laser_msg.ranges = laser_ranges
        all_laser_msg.angle_min = angle_min
        all_laser_msg.angle_max = angle_max
        all_laser_msg.angle_increment = angle_increment
        all_laser_msg.range_min = 0.1
        all_laser_msg.range_max = 7.0
        all_laser_msg.intensities = []
        self.all_laser_pub.publish(all_laser_msg)

        rospy.logdebug("all_laser_msg len: " + str(len(all_laser_msg.ranges)))
        pc_redone = self.lp.projectLaser(all_laser_msg, channel_options=0x00)
        rospy.logdebug("all_laser pc_redone len: " + str(pc_redone.width))
        self.pc_redone_pub.publish(pc_redone)

        # compare what came in and what came out
        rospy.logdebug("point_cloud frame_id, pc_redone frame_id:")
        rospy.logdebug(
            (point_cloud.header.frame_id, pc_redone.header.frame_id))
        rospy.logdebug("point_cloud is correct, pc_redone is incorrect")
        compare_str = "\n"
        for idx, (point_in, point_out) in enumerate(
                zip(read_points(point_cloud), read_points(pc_redone))):
            point_out = [point_out[0], point_out[1], 0.0]
            point_in = [point_in[0], point_in[1], 0.0]
            compare_str += str(idx).zfill(2) + ":\n"
            compare_str += "  in : " + str(point_in)
            compare_str += "\n  out: " + str(point_out) + "\n"
            dist = np.linalg.norm(np.array(point_out) - np.array(point_in))
            compare_str += " dist: " + str(dist) + "\n"
            # angle
            angle1 = atan2(point_in[1], point_in[0])
            angle2 = atan2(point_out[1], point_out[0])
            angle_dif = angle2 - angle1
            compare_str += " angle dif: " + str(angle_dif) + "\n"

        rospy.logdebug(compare_str)

    def pc_to_laser(self, cloud):
        laser_points = []
        points_rereprojected = []
        multiply_num_rays = 8
        num_rays = 61 * multiply_num_rays
        laser_points2 = [float('nan')] * num_rays
        min_angle = -radians(self.half_max_angle)
        max_angle = radians(self.half_max_angle)
        # angle_increment = self.angle_increment
        angle_increment = (radians(self.half_max_angle) *
                           2.0) / float(num_rays)
        big_str = "\n"
        for idx, p in enumerate(read_points(cloud, skip_nans=False)):
            #dist = self.get_dist(p[0], p[1])
            p = [p[0], p[1], 0.0]
            dist = np.linalg.norm(np.array((0., 0., 0.)) - np.array(p))
            # dist1 = self.get_dist(p[0], p[1])
            big_str += "   " + str(idx).zfill(2) + ": x: " + str(round(
                p[0], 2)) + ", y: " + str(round(p[1], 2)) + ", z: " + str(
                    round(p[2], 2)) + " = " + str(round(
                        dist, 2)) + "m (at " + str(
                            round(degrees(idx * angle_increment + min_angle),
                                  2)) + "deg)\n"

            laser_points.append(dist)
            # coords from dist
            x = dist * cos(idx * angle_increment + min_angle)
            y = dist * sin(idx * angle_increment + min_angle)
            print(" [ px, py, are the correct points ] ")
            print("dist, px, py: " + str(dist) + " " + str(p[0])) + " " + str(
                p[1])
            print("dist, x, y:   " + str(dist) + " " + str(x) + " " + str(y))

            dist_from_rereproj = self.get_dist(x, y)
            print("dist rereproj: " + str(dist_from_rereproj))
            # print("dist1       : " + str(dist1))

            # what if a make a pointcloud based in the cos sin version
            points_rereprojected.append((x, y, 0.0))

            # angle from point
            angle = atan2(p[1], p[0])
            # angle2 = atan2(y, x)
            expected_angle = idx * self.angle_increment + min_angle
            if not isnan(angle):
                tmp_angle = angle - min_angle
                print("tmp_angle: " + str(degrees(tmp_angle))) + " deg"
                print("angle_increment: " + str(degrees(angle_increment)))
                closest_index = int(tmp_angle / angle_increment)
                print("closest index: " + str(closest_index))
                if closest_index >= len(laser_points2):
                    laser_points2[-1] = dist
                elif closest_index < 0:
                    laser_points2[0] = dist
                else:
                    laser_points2[closest_index] = dist
            else:
                print("nan, not adding anything to scan")

            # laser_points[]
            print("Angle from p : " + str(round(degrees(angle), 2)))
            # print("Angle from xy: " + str(round(degrees(angle2), 2)))
            print("Expected angle: " + str(round(degrees(expected_angle), 2)))

        rospy.logdebug("Lasered cloud")
        rospy.logdebug(big_str)

        laser_points = laser_points2
        print("Len of laser points after new technique: " +
              str(len(laser_points)))

        rereprojected_pc = PointCloud2()
        rereprojected_pc.header.frame_id = 'base_footprint'
        rereprojected_pc.header.stamp = rospy.Time.now()
        point_cloud_rere = create_cloud_xyz32(rereprojected_pc.header,
                                              points_rereprojected)
        self.pc_rereprojected_pub.publish(point_cloud_rere)

        return laser_points, min_angle, max_angle, angle_increment

    def get_dist(self, x0, y0, x1=0.0, y1=0.0):
        return sqrt((x1 - x0)**2 + (y1 - y0)**2)
Пример #13
0
class PCLChecker(object):
    def __init__(self):
        rospy.init_node('pcl_checker', anonymous=True, disable_signals=True)
        # Original laser scan
        lleft_sub = Subscriber('/pepper/scan_left', LaserScan)
        lfront_sub = Subscriber('/pepper/scan_front', LaserScan)
        lright_sub = Subscriber('/pepper/scan_right', LaserScan)

        # Original point cloud
        pcleft_sub = Subscriber('/pepper/pc_left', PointCloud2)
        pcfront_sub = Subscriber('/pepper/pc_front', PointCloud2)
        pcright_sub = Subscriber('/pepper/pc_right', PointCloud2)

        # Translated point cloud
        pcall_sub = Subscriber('/cloud_in', PointCloud2)

        self.ts = TimeSynchronizer([
            lleft_sub, lfront_sub, lright_sub, pcleft_sub, pcfront_sub,
            pcright_sub, pcall_sub
        ], 10)

        self.ts.registerCallback(self.check_values)

    def get_dist(self, x0, y0, x1=0, y1=0):
        return sqrt((x1 - x0)**2 + (y1 - y0)**2)

    def check_values(self, olleft, olfront, olright, opc_right, opc_left,
                     opc_front, allpc):
        # Front
        olfront_angles = []
        opc_dist = []
        opc_angles = []

        # Left
        olleft_angles = []
        opcleft_dist = []
        opcleft_angles = []

        # Right
        olright_angles = []
        opcright_dist = []
        opcright_angles = []

        # Combined
        alllaser_dist = []
        alllaser_angles = []
        allpc_dist = []
        allpc_angles = []

        # Right
        for i in range(len(olright.ranges)):
            alllaser_dist.append(olright.ranges[i])
            alllaser_angles.append(olright.angle_min +
                                   i * olright.angle_increment - 1.757)
            olright_angles.append(olright.angle_min +
                                  i * olright.angle_increment - 1.757)

        for p in read_points(opc_right, skip_nans=True):
            distance = self.get_dist(p[0], p[1])
            opcright_dist.append(distance)
            opcright_angles.append(atan2(p[1], p[0]) - 1.757)

        # Front
        for i in range(len(olfront.ranges)):
            alllaser_dist.append(olfront.ranges[i])
            alllaser_angles.append(olfront.angle_min +
                                   i * olfront.angle_increment)
            olfront_angles.append(olfront.angle_min +
                                  i * olfront.angle_increment)

        for p in read_points(opc_front, skip_nans=True):
            opc_dist.append(self.get_dist(p[0], p[1]))
            opc_angles.append(atan2(p[1], p[0]))

        # Left
        for i in range(len(olleft.ranges)):
            alllaser_dist.append(olleft.ranges[i])
            alllaser_angles.append(olleft.angle_min +
                                   i * olleft.angle_increment + 1.757)
            olleft_angles.append(olleft.angle_min +
                                 i * olleft.angle_increment + 1.757)

        for p in read_points(opc_left, skip_nans=True):
            distance = self.get_dist(p[0], p[1])
            opcleft_dist.append(distance)
            opcleft_angles.append(atan2(p[1], p[0]) + 1.757)

        # All combined
        for p in read_points(allpc, skip_nans=True):
            allpc_dist.append(self.get_dist(p[0], p[1]))
            allpc_angles.append(atan2(p[1], p[0]))

        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("original right laser")
        rospy.loginfo(olright.ranges)
        rospy.loginfo(olright_angles)
        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("original front laser")
        rospy.loginfo(olfront.ranges)
        rospy.loginfo(olfront_angles)
        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("original left laser")
        rospy.loginfo(olleft.ranges)
        rospy.loginfo(olleft_angles)
        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("all combined laser")
        rospy.loginfo(alllaser_dist)
        rospy.loginfo(alllaser_angles)
        rospy.loginfo(
            "==============================================================")
        rospy.loginfo("original right point cloud")
        rospy.loginfo(opcright_dist)
        rospy.loginfo(opcright_angles)
        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("original front point cloud")
        rospy.loginfo(opc_dist)
        rospy.loginfo(opc_angles)
        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("original left point cloud")
        rospy.loginfo(opcleft_dist)
        rospy.loginfo(opcleft_angles)
        rospy.loginfo(
            "===============================================================")
        rospy.loginfo("all combined point cloud")
        rospy.loginfo(allpc_dist)
        rospy.loginfo(allpc_angles)
        rospy.loginfo(
            "===============================================================")
Пример #14
0
class ClassifierNode:
    def __init__(self):
        self.cv_bridge = CvBridge()

        self.path_sign_model = rospy.get_param('~traffic_sign_path')
        self.path_light_model = rospy.get_param('~traffic_light_path')

        self.cuda_devices = {
            0: 'cuda:0',
            1: 'cuda:1',
            2: 'cuda:2',
            3: 'cuda:3'
        }

        gpu_id = rospy.get_param('~gpu_id')
        self.device = torch.device(self.cuda_devices[gpu_id])

        self.num_traffic_light = rospy.get_param('~num_traffic_light_class')
        self.model_traffic_light = STN(self.num_traffic_light).to(self.device)
        state_dict_light = torch.load(self.path_light_model)
        self.model_traffic_light.load_state_dict(state_dict_light)
        self.model_traffic_light.eval().to(self.device)
        self.model_traffic_light.cuda().to(self.device)

        self.num_traffic_sign = rospy.get_param('~num_traffic_sign_class')
        self.model_traffic_sign = STN(self.num_traffic_sign).to(self.device)
        state_dict_sign = torch.load(self.path_sign_model)
        self.model_traffic_sign.load_state_dict(state_dict_sign)
        self.model_traffic_sign.eval().to(self.device)
        self.model_traffic_sign.cuda().to(self.device)

        self.pub_fm01_new_bbox = rospy.Publisher(
            node_handle_name + '/cam_fm_01/detection_2d_array/',
            Detection2DArray,
            queue_size=1)
        self.pub_fl01_new_bbox = rospy.Publisher(
            node_handle_name + '/cam_fl_01/detection_2d_array/',
            Detection2DArray,
            queue_size=1)
        self.pub_fr01_new_bbox = rospy.Publisher(
            node_handle_name + '/cam_fr_01/detection_2d_array/',
            Detection2DArray,
            queue_size=1)

        self.topic_fm01_img = rospy.get_param('~topic_name_fm01_img')
        self.topic_fm01_bbox = rospy.get_param('~topic_name_fm01_bbox')

        self.sub_image_fm01 = Subscriber(self.topic_fm01_img, Image)
        self.sub_bbox_fm01 = Subscriber(self.topic_fm01_bbox, Detection2DArray)
        self.time_sync_fm01 = TimeSynchronizer(
            [self.sub_image_fm01, self.sub_bbox_fm01], 10)
        self.time_sync_fm01.registerCallback(self.callback_synchronize,
                                             "cam_fm_01")

        self.topic_fl01_img = rospy.get_param('~topic_name_fl01_img')
        self.topic_fl01_bbox = rospy.get_param('~topic_name_fl01_bbox')
        self.sub_image_fl01 = Subscriber(self.topic_fl01_img, Image)
        self.sub_bbox_fl01 = Subscriber(self.topic_fl01_bbox, Detection2DArray)
        self.time_sync_fl01 = TimeSynchronizer(
            [self.sub_image_fl01, self.sub_bbox_fl01], 10)
        self.time_sync_fl01.registerCallback(self.callback_synchronize,
                                             "cam_fl_01")

        self.topic_fr01_img = rospy.get_param('~topic_name_fr01_img')
        self.topic_fr01_bbox = rospy.get_param('~topic_name_fr01_bbox')
        self.sub_image_fr01 = Subscriber(self.topic_fr01_img, Image)
        self.sub_bbox_fr01 = Subscriber(self.topic_fr01_bbox, Detection2DArray)
        self.time_sync_fr01 = TimeSynchronizer(
            [self.sub_image_fr01, self.sub_bbox_fr01], 10)
        self.time_sync_fr01.registerCallback(self.callback_synchronize,
                                             "cam_fr_01")

    def callback_synchronize(self, msg_img, msg_bbox, cam_id):

        cv_img = self.cv_bridge.imgmsg_to_cv2(msg_img)
        new_bbox = Detection2DArray()

        print("aaaaa")

        for detection in msg_bbox.detections:

            box = Detection2D()
            box.bbox = detection.bbox
            pose = ObjectHypothesisWithPose()

            print(detection.results[0].id)

            if detection.results[0].id == 7:  # id:7 Traffic light

                x_min = int(detection.bbox.center.x -
                            (detection.bbox.size_x / 2))
                y_min = int(detection.bbox.center.y -
                            (detection.bbox.size_y / 2))
                x_max = int(detection.bbox.center.x +
                            (detection.bbox.size_x / 2))
                y_max = int(detection.bbox.center.y +
                            (detection.bbox.size_y / 2))

                img_cropped = cv_img[y_min:y_max, x_min:x_max]

                preprocessed_img = self.preprocess_img(img=img_cropped)
                predict_f = self.model_traffic_light(preprocessed_img)
                predict = predict_f.data.max(1, keepdim=True)[1]
                pose.id = int(1000 + int(predict))
                cnn_prob = float(torch.exp(predict_f)[0][int(predict)])
                pose.score = cnn_prob
                box.results.append(pose)

                # pose.id = int(1000 + int(predict))
                # box.results.append(pose)
                if self.validate_light_with_brightness_region(
                        img_cropped, int(predict)):
                    new_bbox.detections.append(box)

            elif detection.results[0].id == 8:  # id:8 Traffic sign

                x_min = int(detection.bbox.center.x -
                            (detection.bbox.size_x / 2))
                y_min = int(detection.bbox.center.y -
                            (detection.bbox.size_y / 2))
                x_max = int(detection.bbox.center.x +
                            (detection.bbox.size_x / 2))
                y_max = int(detection.bbox.center.y +
                            (detection.bbox.size_y / 2))

                img_cropped = cv_img[y_min:y_max, x_min:x_max]

                preprocessed_img = self.preprocess_img(img=img_cropped)
                predict_f = self.model_traffic_sign(preprocessed_img)
                predict = predict_f.data.max(1, keepdim=True)[1]
                cnn_prob = float(torch.exp(predict_f)[0][int(predict)])
                pose.score = cnn_prob
                pose.id = int(2000 + int(predict))
                box.results.append(pose)
                new_bbox.detections.append(box)

            else:
                box = detection
                new_bbox.detections.append(box)

        new_bbox.header = msg_bbox.header
        if cam_id == "cam_fm_01":
            self.pub_fm01_new_bbox.publish(new_bbox)
        elif cam_id == "cam_fr_01":
            self.pub_fr01_new_bbox.publish(new_bbox)
        elif cam_id == "cam_fl_01":
            self.pub_fl01_new_bbox.publish(new_bbox)

    def preprocess_img(self, img):

        loader = transforms.Compose([
            transforms.Resize((32, 32)),
            transforms.ToTensor(),
            transforms.Normalize((0.3337, 0.3064, 0.3171),
                                 (0.2672, 0.2564, 0.2629))
        ])

        img = PilImage.fromarray(img)
        img = loader(img).float()
        img = Variable(img)
        img = img.unsqueeze(0)

        return img.cuda(device=self.device)

    def validate_light_with_brightness_region(self, rgb_image, light_id):

        if light_id == 0:
            return True

        hsv = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2HSV)

        v = hsv[:, :, 2]
        height, width = v.shape

        height_div3 = int((height - 6) / 3)

        red_region = v[3:3 + height_div3, 3:width - 3]
        yellow_region = v[3 + height_div3:3 + height_div3 * 2, 3:width - 3]
        green_region = v[3 + height_div3 * 2:3 + height_div3 * 3, 3:width - 3]

        sum_brightness_red = np.sum(red_region)
        avg_brightness_red = sum_brightness_red / (height_div3 * width)
        sum_brightness_yellow = np.sum(yellow_region)
        avg_brightness_yellow = sum_brightness_yellow / (height_div3 * width)
        sum_brightness_green = np.sum(green_region)
        avg_brightness_green = sum_brightness_green / (height_div3 * width)

        # print(sum_brightness_red)
        # print(sum_brightness_yellow)
        # print(sum_brightness_green)

        if avg_brightness_red >= avg_brightness_yellow and avg_brightness_red >= avg_brightness_green:
            brightness_predict = 1
        elif avg_brightness_yellow >= avg_brightness_green:
            brightness_predict = 2
        else:
            brightness_predict = 3

        if brightness_predict == light_id:
            return True
        else:
            return False
Пример #15
0
                                   Image,
                                   queue_size=100)
    vis_render_0 = rospy.Publisher(CONFIG["vis_topic_0"],
                                   Image,
                                   queue_size=100)

    kps_pub_1 = rospy.Publisher(CONFIG["kps_topic_1"],
                                KeypointsList,
                                queue_size=100)
    hm_render_1 = rospy.Publisher(CONFIG["hm_topic_1"], Image, queue_size=100)
    img_render_1 = rospy.Publisher(CONFIG["hm_img_topic_1"],
                                   Image,
                                   queue_size=100)
    vis_render_1 = rospy.Publisher(CONFIG["vis_topic_1"],
                                   Image,
                                   queue_size=100)

    # define the sync subscriber(google `message filter` for more info)
    tss_0 = TimeSynchronizer([
        Subscriber(CONFIG["image_src_0"], Image),
        Subscriber(CONFIG["bbxes_src_0"], BoundingBoxes)
    ], 200)
    tss_0.registerCallback(callback_0)

    tss_1 = TimeSynchronizer([
        Subscriber(CONFIG["image_src_1"], Image),
        Subscriber(CONFIG["bbxes_src_1"], BoundingBoxes)
    ], 200)
    tss_1.registerCallback(callback_1)
    rospy.spin()
Пример #16
0
        exit(0)


if __name__ == "__main__":
    parser = init_argparse()
    args, unknown = parser.parse_known_args()

    rospy.init_node('object_finder')
    pose_pub = rospy.Publisher(POS_TOPIC, PoseStamped, queue_size=1)
    line_pub = rospy.Publisher(MARKER_TOPIC, Marker, queue_size=1)

    tss = TimeSynchronizer([
        Subscriber(IMAGE_TOPIC, Image),
        Subscriber(CAMERA_INFO_TOPIC, CameraInfo)
    ], 10)
    tss.registerCallback(image_callback)

    bridge = CvBridge()

    print('----------------------------------------------')
    print('OpenCV object detector')
    print('Controls:')
    print('    - S     Save current config')
    print('    - ESC   Exit without saving current config')
    print('----------------------------------------------')

    config = Settings.load(args.config, default=make_default_config())
    if config == None:
        exit(1)

    if args.gui == 'full':
Пример #17
0
                                KeypointsList,
                                queue_size=100)
    kps_pub_1 = rospy.Publisher(CONFIG["kps_data_1"],
                                KeypointsList,
                                queue_size=100)

    rospy.init_node('cropper', anonymous=True)
    crop_length = CONFIG["crop_length"]

    tss_0 = TimeSynchronizer([
        Subscriber(CONFIG["img_topic_0_remap"], Image),
        Subscriber(CONFIG["img_topic_1_remap"], Image),
        Subscriber(CONFIG["kps_data_0_remap"], KeypointsList),
        Subscriber(CONFIG["kps_data_1_remap"], KeypointsList)
    ], 200)
    tss_0.registerCallback(callback)
    rospy.spin()

    # rate = rospy.Rate(CONFIG["hz"])

    # # I don't know why we have to set image scale fixed
    # # Or else it seems like the yolo will fail at recognize varied scales imgs(you can try comment cv2.resize(..))
    # pre_width=CONFIG["pre_width"]
    # pre_height=CONFIG["pre_height"]

    # # fetch the image name lists
    # name_list_0=[]
    # name_list_1=[]
    # if CONFIG["list_path_0"]!="":
    #     with open(CONFIG["list_path_0"],"r") as f_0:
    #         name_list_0 = f_0.readlines()
Пример #18
0
image6_sub = Subscriber('/camera6/color/image_raw_throttle_sync', Image)

aligned_depth1_sub = Subscriber('/camera1/aligned_depth_to_color/image_raw',
                                Image)
aligned_depth2_sub = Subscriber('/camera2/aligned_depth_to_color/image_raw',
                                Image)
aligned_depth3_sub = Subscriber('/camera3/aligned_depth_to_color/image_raw',
                                Image)
aligned_depth4_sub = Subscriber('/camera4/aligned_depth_to_color/image_raw',
                                Image)
aligned_depth5_sub = Subscriber('/camera5/aligned_depth_to_color/image_raw',
                                Image)
aligned_depth6_sub = Subscriber('/camera6/aligned_depth_to_color/image_raw',
                                Image)

ts1 = TimeSynchronizer([image1_sub, aligned_depth1_sub], 10)
ts2 = TimeSynchronizer([image2_sub, aligned_depth2_sub], 10)
ts3 = TimeSynchronizer([image3_sub, aligned_depth3_sub], 10)
ts4 = TimeSynchronizer([image4_sub, aligned_depth4_sub], 10)
ts5 = TimeSynchronizer([image5_sub, aligned_depth5_sub], 10)
ts6 = TimeSynchronizer([image6_sub, aligned_depth6_sub], 10)

ts1.registerCallback(pubLishImages1)
ts2.registerCallback(pubLishImages2)
ts3.registerCallback(pubLishImages3)
ts4.registerCallback(pubLishImages4)
ts5.registerCallback(pubLishImages5)
ts6.registerCallback(pubLishImages6)

rospy.spin()