示例#1
0
def inside_polygon(p, polygon):
    """
    This functions checks if a point is inside a certain lane (or area), a.k.a. polygon.
    (https://jsbsan.blogspot.com/2011/01/saber-si-un-punto-esta-dentro-o-fuera.html)

    Takes a point and a polygon and returns if it is inside (1) or outside(0).
    """
    counter = 0
    xinters = 0
    detection = False

    p1 = Node()
    p2 = Node()

    p1 = polygon[0] # First column = x coordinate. Second column = y coordinate

    for i in range(1,len(polygon)+1):
        p2 = polygon[i%len(polygon)]

        if (p.y > min(p1.y,p2.y)):
            if (p.y <= max(p1.y,p2.y)):
                if (p.x <= max(p1.x,p2.x)):
                    if (p1.y != p2.y):
                        xinters = (p.y-p1.y)*(p2.x-p1.x)/(p2.y-p1.y)+p1.x
                        if (p1.x == p2.x or p.x <= xinters):
                            counter += 1
        p1 = p2

    if (counter % 2 == 0):
        return False
    else:
        return True
def calculate_3d_corners(bbox):
    """
    Get 3D bounding box corners

    bbox[0,1,2] -> LiDAR centroid
    bbox[3,4,5] -> l,w,h
    bbox[6]     -> yaw
    bbox[7,8]   -> vx,vy (global)

    Regarding the corners, from 0-3 lower, from 4-7 upper, 8 is the centroid (bbox[0,1,2])
        6 -------- 7
       /|         /|
      4 -------- 5 .
      | |    8   | |
      . 2 -------- 3
      |/         |/
      0 -------- 1
    """

    x, y, z = bbox[0:3]
    l, w, h = bbox[3:6]
    yaw = bbox[6]

    x_corners = [-l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2]
    y_corners = [w / 2, -w / 2, w / 2, -w / 2, w / 2, -w / 2, w / 2, -w / 2]
    z_corners = [-h / 2, -h / 2, -h / 2, -h / 2, h / 2, h / 2, h / 2, h / 2]

    if yaw > np.pi:
        yaw -= np.pi
    R = rotz(yaw)

    corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners]))
    corners_3d = corners_3d + np.vstack([x, y, z])

    corners_3d_list = []

    for i in range(corners_3d.shape[1]):
        node = Node()
        node.x = corners_3d[0, i]
        node.y = corners_3d[1, i]
        node.z = corners_3d[2, i]
        corners_3d_list.append(node)

    return corners_3d_list
示例#3
0
def find_closest_segment(way, point):
    """
    This functions obtains the closest segment (two nodes) of a certain way (left or right)

    Returns the nodes that comprise this segment and the distance
    """

    min_distance = 999999

    closest_node_0 = Node()
    closest_node_1 = Node()

    closest = -1

    for i in range(len(way)-1):
        node_0 = Node()
        node_0.x = way[i].x 
        node_0.y = way[i].y

        node_1 = Node()
        node_1.x = way[i+1].x 
        node_1.y = way[i+1].y

        distance, _ =  geometric_functions.pnt2line(point, node_0, node_1)

        if (distance < min_distance):
            min_distance = distance
            closest = i
            closest_node_0 = node_0
            closest_node_1 = node_1

    if min_distance != 999999:   
        if (closest > 0) and (closest < len(way)-2):
            return closest-1,closest+2,min_distance
        elif (closest > 0) and (closest == len(way)-2):
            return closest-1,closest+1,min_distance
        elif closest == 0 and (len(way) == 2): 
            return 0,1,min_distance
        elif closest == 0 and (len(way) > 2): 
            return 0,2,min_distance
    else:
        return -1, -1, -1
示例#4
0
def calculate_aux_point(point,m,delta):
    aux_point = Node()

    if (m == 9999):
        aux_point.x = point.x
        aux_point.y = point.y + delta
    else:
        aux_point.x = point.x + delta
        aux_point.y = m*delta + point.y
    return aux_point
示例#5
0
def calculate_index_last_waypoint(lane, predicted_distance):
    """
    """

    accumulated_distance = 0

    for i in range(len(lane.left.way) - 1):
        cn = Node()  # Current node
        cn.x = lane.left.way[i].x
        cn.y = -lane.left.way[i].y

        nn = Node()  # Next node
        nn.x = lane.left.way[i + 1].x
        nn.y = -lane.left.way[i + 1].y

        diff = math.sqrt(pow(cn.x - nn.x, 2) + pow(cn.y - nn.y, 2))
        accumulated_distance += diff

        if (accumulated_distance > predicted_distance):
            return i + 1
    return len(lane.left.way) - 1
示例#6
0
    def callback(self, detections_rosmsg, odom_rosmsg,
                 monitorized_lanes_rosmsg):
        """
        """

        try:  # Target # Pose
            (translation, quaternion) = self.listener.lookupTransform(
                '/map', '/ego_vehicle/lidar/lidar1', rospy.Time(0))
            # rospy.Time(0) get us the latest available transform
            rot_matrix = tf.transformations.quaternion_matrix(quaternion)

            self.tf_map2lidar = rot_matrix
            self.tf_map2lidar[:3,
                              3] = self.tf_map2lidar[:3,
                                                     3] + translation  # This matrix transforms local to global coordinates

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            print("\033[1;33m" + "TF exception" + '\033[0;m')

        self.start = time.time()

        self.detections_subscriber.unregister()
        self.odom_subscriber.unregister()
        self.monitorized_lanes_subscriber.unregister()

        # Initialize the scene

        if not self.init_scene:
            self.real_height = detections_rosmsg.front - detections_rosmsg.back  # Grid height (m)
            self.real_front = detections_rosmsg.front + self.view_ahead  # To study obstacles (view_head) meters ahead
            self.real_width = -detections_rosmsg.left + detections_rosmsg.right  # Grid width (m)
            self.real_left = -detections_rosmsg.left

            r = float(self.image_height) / self.real_height

            self.image_width = int(round(self.real_width *
                                         r))  # Grid width (pixels)
            self.image_front = int(round(self.real_front * r))
            self.image_left = int(round(self.real_left * r))

            print("Real width: ", self.real_width)
            print("Real height: ", self.real_height)
            print("Image width: ", self.image_width)
            print("Image height: ", self.image_height)

            self.shapes = (self.real_front, self.real_left, self.image_front,
                           self.image_left)
            self.scale_factor = (self.image_height / self.real_height,
                                 self.image_width / self.real_width)

            # Our centroid is defined in BEV camera coordinates but centered in the ego-vehicle.
            # We need to traslate it to the top-left corner (required by the SORT algorithm)

            self.tf_bevtl2bevcenter_m = np.array(
                [[1.0, 0.0, 0.0, self.shapes[1]],
                 [0.0, 1.0, 0.0, self.shapes[0]], [0.0, 0.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0]])

            # From BEV centered to BEV top-left (in pixels)

            self.tf_bevtl2bevcenter_px = np.array(
                [[1.0, 0.0, 0.0, -self.shapes[3]],
                 [0.0, 1.0, 0.0, -self.shapes[2]], [0.0, 0.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0]])

            # Rotation matrix from LiDAR frame to BEV frame (to convert to global coordinates)

            self.tf_lidar2bev = np.array([[0.0, -1.0, 0.0, 0.0],
                                          [-1.0, 0.0, 0.0, 0.0],
                                          [0.0, 0.0, -1.0, 0.0],
                                          [0.0, 0.0, 0.0, 1.0]])

            # Initialize the regression model to calculate the braking distance

            self.velocity_braking_distance_model = monitors_functions.fit_velocity_braking_distance_model(
            )

            # Tracker

            self.mot_tracker = sort_functions.Sort(max_age, min_hits, n,
                                                   self.shapes,
                                                   self.trajectory_prediction,
                                                   self.filter_hdmap)

            self.init_scene = True

        output_image = np.ones(
            (self.image_height, self.image_width, 3),
            dtype="uint8")  # Black image to represent the scene

        print("----------------")

        # Initialize ROS and monitors variables

        self.trackers_marker_list = visualization_msgs.msg.MarkerArray(
        )  # Initialize trackers list
        monitorized_area_colours = []

        trackers_in_route = 0
        nearest_distance = std_msgs.msg.Float64()
        nearest_distance.data = float(self.nearest_object_in_route)

        world_features = []
        trackers = []
        dynamic_trackers = []
        static_trackers = []
        ndt = 0

        timer_rosmsg = detections_rosmsg.header.stamp.to_sec()

        # Predict the ego-vehicle trajectory

        monitors_functions.ego_vehicle_prediction(self, odom_rosmsg,
                                                  output_image)

        print("Braking distance ego vehicle: ",
              float(self.ego_braking_distance))

        # Convert input data to bboxes to perform Multi-Object Tracking

        #print("\nNumer of total detections: ", len(detections_rosmsg.bev_detections_list))
        bboxes_features, types = sort_functions.bbox_to_xywh_cls_conf(
            self, detections_rosmsg, odom_rosmsg, detection_threshold,
            output_image)
        #print("Number of relevant detections: ", len(bboxes_features)) # score > detection_threshold

        # Emergency break (Only detection)
        """
        nearest_object_in_route = 99999

        if not self.collision_flag.data:
            for bbox,type_object in zip(bboxes_features,types):
                for lane in monitorized_lanes_rosmsg.lanes: 
                    if (lane.role == "current" and len(lane.left.way) >= 2):
                        detection = Node()

                        bbox = bbox.reshape(1,-1)
                        global_pos = sort_functions.store_global_coordinates(self.tf_map2lidar,bbox)
                        detection.x = global_pos[0,0]
                        detection.y = -global_pos[1,0] # N.B. In CARLA this coordinate is the opposite

                        is_inside, particular_monitorized_area, dist2centroid = monitors_functions.inside_lane(lane,detection,type_object)

                        if is_inside:
                            #distance_to_object = monitors_functions.calculate_distance_to_nearest_object_inside_route(monitorized_lanes_rosmsg,global_pos)
                            
                            
                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y

                            distance_to_object = math.sqrt(pow(ego_x_global-detection.x,2)+pow(ego_y_global-detection.y,2))
                            
                            
                            
                            
                            
                            
                            print("Distance to object: ", distance_to_object)
                            if distance_to_object < self.nearest_object_in_route:
                                self.nearest_object_in_route = distance_to_object
                                nearest_distance.data = float(self.nearest_object_in_route)

                            print("Braking distance: ", self.ego_braking_distance)
                            
                            if distance_to_object < self.ego_braking_distance:
                                print("Break")
                                self.collision_flag.data = True
                                self.pub_collision.publish(self.collision_flag)
                                self.emergency_break_timer = time.time()
                                self.cont = 0
                                break
                else: 
                    continue 
                break
        else:
            dist = 99999
            for bbox,type_object in zip(bboxes_features,types):
                for lane in monitorized_lanes_rosmsg.lanes: 
                    if (lane.role == "current" and len(lane.left.way) >= 2):
                        detection = Node()

                        bbox = bbox.reshape(1,-1)
                        global_pos = sort_functions.store_global_coordinates(self.tf_map2lidar,bbox)
                        detection.x = global_pos[0,0]
                        detection.y = -global_pos[1,0] # N.B. In CARLA this coordinate is the opposite

                        is_inside, particular_monitorized_area, dist2centroid = monitors_functions.inside_lane(lane,detection,type_object)
                        print("inside: ", is_inside)
                        if is_inside:
                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y

                            aux = math.sqrt(pow(ego_x_global-detection.x,2)+pow(ego_y_global-detection.y,2))

                            if aux < dist:
                                dist = aux
                        break

            if dist > 5:
                self.cont += 1
            else:
                self.cont = 0
            print("distance to object after collision: ", dist)
            print("cont: ", self.cont)

        if self.cont >= 3:
            self.collision_flag.data = False
            self.nearest_object_in_route = 50000
            nearest_distance.data = float(self.nearest_object_in_route)

        print("Data: ", nearest_distance.data)
        print("Collision: ", self.collision_flag.data)

        self.pub_nearest_object_distance.publish(nearest_distance)
        self.pub_collision.publish(self.collision_flag)
        """
        ## Multi-Object Tracking

        # TODO: Publish on tracked_obstacle message instead of visualization marker
        # TODO: Evaluate the predicted position to predict its influence in a certain use case

        if (len(bboxes_features) > 0):  # At least one object was detected
            trackers, object_types, object_scores, object_observation_angles, dynamic_trackers, static_trackers = self.mot_tracker.update(
                bboxes_features, types, self.ego_vel_px, self.tf_map2lidar,
                self.shapes, self.scale_factor, monitorized_lanes_rosmsg,
                timer_rosmsg, self.angle_bb, self.geometric_monitorized_area)

            #print("Number of trackers: ", len(trackers))
            if len(dynamic_trackers.shape) == 3:
                #print("Dynamic trackers", dynamic_trackers.shape[1])
                ndt = dynamic_trackers.shape[1]
            else:
                #print("Dynamic trackers: ", 0)
                ndt = 0
            #print("Static trackers: ", static_trackers.shape[0])

            id_nearest = -1

            if (len(trackers) > 0):  # At least one object was tracked
                for i, tracker in enumerate(trackers):
                    object_type = object_types[i]
                    object_score = object_scores[i]
                    object_rotation = np.float64(object_observation_angles[i,
                                                                           0])
                    object_observation_angle = np.float64(
                        object_observation_angles[i, 1])

                    color = self.colours[tracker[5].astype(int) % 32]
                    #print("hago append")
                    monitorized_area_colours.append(color)

                    if self.ros:
                        world_features = monitors_functions.tracker_to_topic(
                            self, tracker, object_type,
                            color)  # world_features (w,l,h,x,y,z,id)
                        #print("WF: ", world_features)
                        if kitti:
                            num_image = detections_rosmsg.header.seq - 1  # Number of image in the dataset, e.g. 0000.txt -> 0
                            object_properties = object_observation_angle, object_rotation, object_score
                            monitors_functions.store_kitti(
                                num_image, path, object_type, world_features,
                                object_properties)

                    if (self.display):
                        my_thickness = -1
                        geometric_functions.compute_and_draw(
                            tracker, color, my_thickness, output_image)

                    label = 'ID %06d' % tracker[5].astype(int)
                    cv2.putText(
                        output_image, label,
                        (tracker[0].astype(int), tracker[1].astype(int) - 20),
                        cv2.FONT_HERSHEY_PLAIN, 1.5, [255, 255, 255], 2)
                    cv2.putText(
                        output_image, object_type,
                        (tracker[0].astype(int), tracker[1].astype(int) - 40),
                        cv2.FONT_HERSHEY_PLAIN, 1.5, [255, 255, 255], 2)

                    # Evaluate if there is some obstacle in lane and calculate nearest distance

                    if self.filter_hdmap:
                        if tracker[-1]:  # In route, last element of the array
                            trackers_in_route += 1
                            obstacle_local_position = np.zeros((1, 9))

                            obstacle_local_position[0, 7] = world_features[3]
                            obstacle_local_position[0, 8] = world_features[4]

                            obstacle_global_position = sort_functions.store_global_coordinates(
                                self.tf_map2lidar, obstacle_local_position)

                            #distance_to_object = monitors_functions.calculate_distance_to_nearest_object_inside_route(monitorized_lanes_rosmsg,obstacle_global_position)

                            detection = Node()

                            detection.x = obstacle_global_position[0, 0]
                            detection.y = -obstacle_global_position[1, 0]

                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y

                            distance_to_object = math.sqrt(
                                pow(ego_x_global - detection.x, 2) +
                                pow(ego_y_global - detection.y, 2))
                            distance_to_object -= 5  # QUITARLO, DEBERIA SER DISTANCIA CENTROIDE OBJETO A MORRO, EN VEZ DE LIDAR A LIDAR, POR ESO
                            # LE METO ESTE OFFSET

                            #print("Distance to object: ", distance_to_object)
                            if distance_to_object < self.nearest_object_in_route:
                                id_nearest = tracker[5]
                                self.nearest_object_in_route = distance_to_object
                    else:
                        # Evaluate in the geometric monitorized area

                        x = world_features[3]
                        y = world_features[4]

                        print("main")
                        print("goemetric area: ",
                              self.geometric_monitorized_area)
                        print("x y: ", x, y)

                        if (x < self.geometric_monitorized_area[0]
                                and x > self.geometric_monitorized_area[1]
                                and y < self.geometric_monitorized_area[2]
                                and y > self.geometric_monitorized_area[3]):
                            trackers_in_route += 1
                            self.cont = 0
                            print("\n\n\nDentro")
                            distance_to_object = math.sqrt(
                                pow(x, 2) + pow(y, 2))
                            print("Nearest: ", self.nearest_object_in_route)
                            print("distance: ", distance_to_object)
                            if distance_to_object < self.nearest_object_in_route:
                                self.nearest_object_in_route = distance_to_object

                print("Collision: ", self.collision_flag.data)
                print("trackers in route: ", trackers_in_route)
                print("Distance nearest: ", self.nearest_object_in_route)
                if self.collision_flag.data and (
                        trackers_in_route == 0 or
                    (self.nearest_object_in_route > 12 and self.abs_vel < 1)):
                    print("suma A")
                    self.cont += 1
                else:
                    self.cont == 0

                nearest_distance.data = self.nearest_object_in_route

                if (self.trajectory_prediction):

                    collision_id_list = [[], []]

                    # Evaluate collision with dynamic trackers

                    for a in range(dynamic_trackers.shape[1]):
                        for j in range(self.n.shape[0]):
                            e = dynamic_trackers[j][a]
                            color = self.colours[e[5].astype(int) % 32]
                            my_thickness = 2
                            geometric_functions.compute_and_draw(
                                e, color, my_thickness, output_image)

                            if (self.ros):
                                object_type = "trajectory_prediction"
                                monitors_functions.tracker_to_topic(
                                    self, e, object_type, color, j)

                        # Predict possible collision (including the predicted bounding boxes)

                        collision_id, index_bb = monitors_functions.predict_collision(
                            self.ego_predicted, dynamic_trackers[:, a])

                        if collision_id != -1:
                            collision_id_list[0].append(collision_id)
                            collision_id_list[1].append(index_bb)

                    # Evaluate collision with static trackers

                    for b in static_trackers:
                        if b[-1]:  # In route, last element of the array
                            collision_id, index_bb = monitors_functions.predict_collision(
                                self.ego_predicted, b,
                                static=True)  # Predict possible collision
                            if (collision_id != -1):
                                collision_id_list[0].append(collision_id)
                                collision_id_list[1].append(index_bb)

                    #if self.nearest_object_in_route < self.ego_braking_distance:
                    #    self.collision_flag.data = True

                    # Collision

                    if not self.collision_flag.data:
                        if not collision_id_list[0]:  # Empty
                            #if id_nearest == -1:
                            collision_id_list[0].append(
                                -1
                            )  # The ego-vehicle will not collide with any object
                            collision_id_list[1].append(-1)
                            self.collision_flag.data = False
                        elif collision_id_list[0] and (
                                self.nearest_object_in_route <
                                self.ego_braking_distance
                                or self.nearest_object_in_route < 12):
                            #elif id_nearest != -1 and (self.nearest_object_in_route < self.ego_braking_distance or self.nearest_object_in_route < 12):
                            self.collision_flag.data = True
                            self.cont = 0

                    #print("Collision id list: ", collision_id_list)
                    if (len(collision_id_list) > 1):
                        message = 'Predicted collision with objects: ' + str(
                            collision_id_list[0])
                    else:
                        message = 'Predicted collision with object: ' + str(
                            collision_id_list[0])

                    cv2.putText(output_image, message, (30, 140),
                                cv2.FONT_HERSHEY_PLAIN, 1.5, [255, 255, 255],
                                2)  # Predicted collision message
            else:
                print("\033[1;33m" + "No object to track" + '\033[0;m')
                monitors_functions.empty_trackers_list(self)

                if self.collision_flag.data:
                    print("suma B")
                    self.cont += 1
        else:
            print("\033[1;33m" + "No objects detected" + '\033[0;m')
            monitors_functions.empty_trackers_list(self)

            if self.collision_flag.data:
                print("suma C")
                self.cont += 1

        print("cont: ", self.cont)
        if self.cont >= 3:
            self.collision_flag.data = False
            self.nearest_object_in_route = 50000
            nearest_distance.data = float(self.nearest_object_in_route)

        self.end = time.time()

        fps = 1 / (self.end - self.start)

        self.avg_fps += fps
        self.frame_no += 1

        print("SORT time: {}s, fps: {}, avg fps: {}".format(
            round(self.end - self.start, 3), round(fps, 3),
            round(self.avg_fps / self.frame_no, 3)))

        message = 'Trackers: ' + str(len(trackers))
        cv2.putText(output_image, message, (30, 20), cv2.FONT_HERSHEY_PLAIN,
                    1.5, [255, 255, 255], 2)
        message = 'Dynamic trackers: ' + str(ndt)
        cv2.putText(output_image, message, (30, 50), cv2.FONT_HERSHEY_PLAIN,
                    1.5, [255, 255, 255], 2)
        try:
            message = 'Static trackers: ' + str(static_trackers.shape[0])
        except:
            message = 'Static trackers: ' + str(0)
        cv2.putText(output_image, message, (30, 80), cv2.FONT_HERSHEY_PLAIN,
                    1.5, [255, 255, 255], 2)

        # Publish the list of tracked obstacles and predicted collision

        print("Data: ", nearest_distance.data)
        print("Collision: ", self.collision_flag.data)

        self.pub_bev_sort_tracking_markers_list.publish(
            self.trackers_marker_list)
        self.pub_collision.publish(self.collision_flag)
        self.pub_nearest_object_distance.publish(nearest_distance)

        print("moni: ", len(monitorized_area_colours))
        self.particular_monitorized_area_list = self.mot_tracker.get_particular_monitorized_area_list(
            detections_rosmsg.header.stamp, monitorized_area_colours)
        self.pub_particular_monitorized_area_markers_list.publish(
            self.particular_monitorized_area_list)

        if self.write_video:
            if not self.video_flag:
                fourcc = cv2.VideoWriter_fourcc(*'MJPG')
                self.output = cv2.VideoWriter(
                    "map-filtered-mot.avi", fourcc, 20,
                    (self.image_width, self.image_height))
            self.output.write(output_image)

        # Add a grid to appreciate the obstacles coordinates

        if (self.grid):
            gap = int(
                round(self.view_ahead * (self.image_width / self.real_width)))
            geometric_functions.draw_basic_grid(gap, output_image, pxstep=50)

        if (self.display):
            cv2.imshow("SORT tracking", output_image)
            cv2.waitKey(1)

        self.detections_subscriber = Subscriber(self.detections_topic,
                                                BEV_detections_list)
        self.odom_subscriber = Subscriber(self.odom_topic,
                                          nav_msgs.msg.Odometry)
        self.monitorized_lanes_subscriber = Subscriber(
            self.monitorized_lanes_topic, MonitorizedLanes)

        ts = TimeSynchronizer([
            self.detections_subscriber, self.odom_subscriber,
            self.monitorized_lanes_subscriber
        ], header_synchro)
        ts.registerCallback(self.callback)
    def update(self,dets,types,ego_vehicle_vel,tf_map2lidar,shapes,scale_factor,monitorized_lanes,timer_rosmsg,angle_bb,geometric_monitorized_area):
        """
        Params:
            
            dets - a numpy array of detections (x,y,w,l,theta,beta,score), where x,y are the centroid coordinates (BEV image plane), w and l the
            width and length of the obstacle (BEV image plane), theta (rotation angle), beta (angle between the ego-vehicle centroid and obstacle
            centroid) and the detection score
            
            types - a numpy array with the corresponding type of the detections
            
            ego_vehicle_vel - a float number that represents the absolute velocity of the car in the image
            
        Requires: this method must be called once for each frame even with empty detections.
        Returns a similar array, where the last column is the object ID.

        NOTE: The number of objects returned may differ from the number of detections provided.
        """
        
        # Auxiliar variables 

        self.frame_count += 1
        self.particular_monitorized_area_list = []

        trks = np.zeros((len(self.trackers),6))
        to_del = []
        ret = []

        ret_static = []
        ret_dynamic = [] 
        in_route = False
        for i in range(self.n.shape[0]):
            ret_dynamic.append([])
        dyn = 0
            
        ret_type = []
        ret_score = []
        ret_angles = [] 

        # 1. Get predicted locations from existing trackers.
        
        for t,trk in enumerate(trks):
            pos = self.trackers[t].predict()[0]

            trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], 0] 
            if(np.any(np.isnan(pos))):
                to_del.append(t)
        trks = np.ma.compress_rows(np.ma.masked_invalid(trks))

        for t in reversed(to_del):
            self.trackers.pop(t)
        #print("Detections: ", dets, len(dets))
        #print("Trackers: ", trks, len(trks))

        matched, unmatched_dets, unmatched_trks = tracking_functions.associate_detections_to_trackers(dets,trks) # Hungarian algorithm

        #print("Matched: ", matched)
        #print("Unmatched dets: ", unmatched_dets)
        #print("Unmatched trackers: ", unmatched_trks)

        # 2. Update matched trackers with assigned detections and evaluate its position inside the monitorized lanes
        num_trks = len(self.trackers)

        for t,trk in enumerate(self.trackers):
            if(t not in unmatched_trks):
                d = matched[np.where(matched[:,1]==t)[0],0][0]
                ret_type.append(types[d])
                ret_score.append(dets[d,6])
            
                #print("Det theta: ", dets[d,4])
                #print("Tracker theta: ", trk.get_state()[0][4])
                if (abs(dets[d,4] - trk.get_state()[0][4]) > math.pi/2): # If the difference between the matched detection and its associated tracker
                                                                         # in two consecutives frames is greater than pi/2 radians, probably the detection is the opposite                                                     
                    dets[d,4] = dets[d,4] + math.pi
                    if (dets[d,4] > 1.8*math.pi): # If the tracker orientation is close to 0, and the detection is close to pi, if we sum pi, the result
                        # is 2*pi, but it does not make sense (huge angular velocity produced), so now substract 2*pi
                        dets[d,4] = dets[d,4] - 2*math.pi

                observation_angle = dets[d,5] + dets[d,4] # Beta + Theta (According to KITTI framework)
                angles = np.array([[dets[d,4],observation_angle]])    
                ret_angles.append(angles)  
                
                # Update the space state 
                                                                    
                trk.update(dets[d,:])
                
                # Get the current global position 

                real_world_x, real_world_y, _, _ = geometric_functions.pixels2realworld(self,trk,shapes)

                aux_array = np.zeros((1,9))
                aux_array[0,4] = trk.kf.x[4]
                aux_array[0,7:] = real_world_x, real_world_y

                current_pos = store_global_coordinates(tf_map2lidar,aux_array)

                trk.current_pos[:2,0] = current_pos[:2,0] # x,y global centroid
                trk.current_pos[2,0] = current_pos[4,0] # global orientation

                if self.filter_hdmap:
                # Check if it is inside the monitorized lanes

                    detection = Node()
                    detection.x = trk.current_pos[0,0]
                    detection.y = -trk.current_pos[1,0]

                    trk_is_inside, trk_in_route, trk_particular_monitorized_area = evaluate_detection_in_monitorized_lanes(detection, monitorized_lanes, types[d])

                    if trk_particular_monitorized_area:
                        self.particular_monitorized_area_list.append(trk_particular_monitorized_area)

                    prediction_in_route = False

                    if trk_is_inside:
                        # Keep the tracker if it is inside the monitorized area (not only the road)

                        trk.off_road = 0

                        # Calculate the global velocities (to predict its position)
                        
                        trk.calculate_global_velocities_and_distance(ego_vehicle_vel,scale_factor,timer_rosmsg)
                        
                        trk.abs_vel = math.sqrt(pow(trk.global_velocities[0,0],2)+pow(trk.global_velocities[1,0],2))
                        #print("Tracker abs vel: ", trk.abs_vel)
                        if (trk.abs_vel > 15):
                            print("\033[1;36m"+"Huge velocity"+'\033[0;m')
                        else:
                            if (trk.abs_vel > 0.5):
                                trk.trajectory_prediction(angle_bb) # Get the predicted bounding box in n seconds 

                                if not trk_in_route:

                                    # Check if the prediction in the road at this moment

                                    # Get the predicted global position 

                                    predicted_bb = trk.trajectory_prediction_bb[:,-1]
                                    real_world_x, real_world_y, _, _ = geometric_functions.pixels2realworld_prediction(self,predicted_bb,shapes)

                                    aux_array = np.zeros((1,9))
                                    aux_array[0,4] = trk.kf.x[4]
                                    aux_array[0,7:] = real_world_x, real_world_y

                                    current_pos = store_global_coordinates(tf_map2lidar,aux_array)

                                    trk.current_pos[:2,0] = current_pos[:2,0] # x,y global centroid
                                    trk.current_pos[2,0] = current_pos[4,0] # global orientation

                                    # Check if it is inside the monitorized lanes

                                    detection = Node()
                                    detection.x = trk.current_pos[0,0]
                                    detection.y = -trk.current_pos[1,0]

                                    for lane in monitorized_lanes.lanes: 
                                        if len(lane.left.way) >= 2 and (lane.role == "current" or lane.role == "front"):
                                            is_inside, in_road, aux_monitorized_area, dist2centroid = monitors_functions.inside_lane(lane,detection,types[d])

                                            if in_road:
                                                prediction_in_route = True
                                                break
                            else:
                                trk.trajectory_prediction_bb = np.zeros((5,self.n.shape[0])) # If it is static, the prediction buffer must be zero

                        print("IN ROUTE 1: ", trk_in_route)
                        print("IN ROUTE: ", prediction_in_route)
                        if trk_in_route or prediction_in_route:      
                                   
                            trk.in_route = 1 # In the road, current lane or next lane             
                    else:
                        trk.off_route += 1
                else:
                    trk.calculate_global_velocities_and_distance(ego_vehicle_vel,scale_factor,timer_rosmsg)

                    x = real_world_x
                    y = real_world_y

                    print("goemetric area: ", geometric_monitorized_area)
                    print("x y: ", x,y)
                    if (x < geometric_monitorized_area[0] and x > geometric_monitorized_area[1]
                        and y < geometric_monitorized_area[2] and y > geometric_monitorized_area[3]):
                        print("in route geometric")
                        trk.in_route = 1
                    else:
                        trk.in_route = 0
                        
                    trk.abs_vel = math.sqrt(pow(trk.global_velocities[0,0],2)+pow(trk.global_velocities[1,0],2))
                    #print("Tracker abs vel: ", trk.abs_vel)
                    if (trk.abs_vel > 15):
                        print("\033[1;36m"+"Huge velocity"+'\033[0;m')
                    else:
                        if (trk.abs_vel > 1.0):
                            trk.trajectory_prediction(angle_bb) # Get the predicted bounding box in n seconds 
                        else:
                            trk.trajectory_prediction_bb = np.zeros((5,self.n.shape[0])) # If it is static, the prediction buffer must be zero

        # 3. Create and initialise new trackers for unmatched detections (if these detections are inside a monitorized_lane)

        for i in unmatched_dets:
            aux = store_global_coordinates(tf_map2lidar,dets[i,:].reshape(1,9))
            type_object = types[i]

            if self.filter_hdmap:
                detection = Node()
                detection.x = aux[0,0]
                detection.y = -aux[1,0] # N.B. In CARLA this coordinate is the opposite

                is_inside, in_route, particular_monitorized_area = evaluate_detection_in_monitorized_lanes(detection,monitorized_lanes, type_object)

                if is_inside: # Create new tracker 
                    trk = tracking_functions.KalmanBoxTracker(dets[i,:],timer_rosmsg)
                    trk.current_pos[:2,0] = aux[:2,0] # x,y global centroid
                    trk.current_pos[2,0] = aux[4,0] # global orientation
                    trk.particular_monitorized_area = particular_monitorized_area

                    if in_route:
                        trk.in_route = 1

                    self.trackers.append(trk)

                if particular_monitorized_area:
                    self.particular_monitorized_area_list.append(particular_monitorized_area)

            else:
                #print("Create new tracker")
                trk = tracking_functions.KalmanBoxTracker(dets[i,:],timer_rosmsg)
                trk.current_pos[:2,0] = aux[:2,0] # x,y global centroid
                trk.current_pos[2,0] = aux[4,0] # global orientation 

                x = trk.current_pos[0,0]
                y = trk.current_pos[1,0]

                if (x < geometric_monitorized_area[0] and x > geometric_monitorized_area[1]
                    and y < geometric_monitorized_area[2] and y > geometric_monitorized_area[3]):
                    trk.in_route = 1
                else:
                    trk.in_route = 0

                self.trackers.append(trk)  

        i = len(self.trackers)
        print("Number of trackers: ", i)
        # 4. Store relevant trackers in lists

        for t,trk in enumerate(self.trackers):
            if(t not in unmatched_trks):  
                d = trk.get_state()[0] # Predicted state in next frame
            if((trk.time_since_update < 1) and (trk.hit_streak >= self.min_hits or self.frame_count <= self.min_hits)):
                # id+1 as MOT benchmark requires positive 
                ret.append(np.concatenate((d,[trk.id+1],[trk.in_route])).reshape(1,-1)) 

                # Distinguish between dynamic and static obstacles

                if self.trajectory_prediction: 
                    for k in range(self.n.shape[0]):
                        e = trk.get_trajectory_prediction_bb(k)[0] # Predicted the bounding box position
                        all_zeros = not np.any(e) # True if all elements of the array are equal to 0

                        if not all_zeros: # Only append if there are predicted bbs for the current tracker
                            ret_dynamic[k].append(np.concatenate((e,[trk.id+1])).reshape(1,-1))
                        else: 
                            ret_static.append(np.concatenate((d,[trk.id+1],[trk.in_route])).reshape(1,-1))
                            break
            i -= 1
            
            # Remove dead tracklet
            
            if(trk.time_since_update > self.max_age or trk.off_route > self.max_age):
                self.trackers.pop(i)
   
        dyn = len(ret_dynamic[0])

        if not ret_static: # The list is empty
            ret_static.append([]) # We need something into the list to be converted to a np.array
   
        if(len(ret)>0 and self.frame_count > 1):       
            a = np.concatenate(ret)   
            b = np.array(ret_type)   
            c = np.array(ret_score)  
            d = np.concatenate(ret_angles)

            try:
                e = np.concatenate(ret_dynamic).reshape(self.n.shape[0],dyn,6)
            except:
                e = np.empty((0,0))
            try:
                f = np.concatenate(ret_static)
            except:
                f = np.empty((0,0))
       
            return a,b,c,d,e,f     
        return np.empty((0,6)),np.empty((0,6)),np.empty((0,6)),np.empty((0,6)),np.empty((0,6)),np.empty((0,6))
    def SmartMOT_callback(self, detections_rosmsg, odom_rosmsg,
                          monitorized_lanes_rosmsg):
        """
        """
        # print(">>>>>>>>>>>>>>>>>>")
        # print("Detections: ", detections_rosmsg.header.stamp.to_sec())
        # print("Odom: ", odom_rosmsg.header.stamp.to_sec())
        # print("Lanes: ", monitorized_lanes_rosmsg.header.stamp.to_sec())

        try:  # Target        # Pose
            (translation, quaternion) = self.listener.lookupTransform(
                self.map_frame, self.lidar_frame, rospy.Time(0))
            # rospy.Time(0) get us the latest available transform
            rot_matrix = tf.transformations.quaternion_matrix(quaternion)

            self.tf_map2lidar = rot_matrix
            self.tf_map2lidar[:3,
                              3] = self.tf_map2lidar[:3,
                                                     3] + translation  # This matrix transforms local to global coordinates

        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            print("\033[1;33m" + "TF exception" + '\033[0;m')

            self.ts.registerCallback(self.SmartMOT_callback)

        self.start = time.time()

        # Initialize the scene

        if not self.init_scene:
            self.real_height = detections_rosmsg.front - detections_rosmsg.back  # Grid height (m)
            self.real_front = detections_rosmsg.front + self.view_ahead  # To study obstacles (view_head) meters ahead
            self.real_width = -detections_rosmsg.left + detections_rosmsg.right  # Grid width (m)
            self.real_left = -detections_rosmsg.left

            r = float(self.image_height) / self.real_height

            self.image_width = int(round(self.real_width *
                                         r))  # Grid width (pixels)
            self.image_front = int(round(self.real_front * r))
            self.image_left = int(round(self.real_left * r))

            print("Real width: ", self.real_width)
            print("Real height: ", self.real_height)
            print("Image width: ", self.image_width)
            print("Image height: ", self.image_height)

            self.shapes = (self.real_front, self.real_left, self.image_front,
                           self.image_left)
            self.scale_factor = (self.image_height / self.real_height,
                                 self.image_width / self.real_width)

            # Our centroid is defined in BEV camera coordinates but centered in the ego-vehicle.
            # We need to traslate it to the top-left corner (required by the SORT algorithm)

            self.tf_bevtl2bevcenter_m = np.array(
                [[1.0, 0.0, 0.0, self.shapes[1]],
                 [0.0, 1.0, 0.0, self.shapes[0]], [0.0, 0.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0]])

            # From BEV centered to BEV top-left (in pixels)

            self.tf_bevtl2bevcenter_px = np.array(
                [[1.0, 0.0, 0.0, -self.shapes[3]],
                 [0.0, 1.0, 0.0, -self.shapes[2]], [0.0, 0.0, 1.0, 0.0],
                 [0.0, 0.0, 0.0, 1.0]])

            # Rotation matrix from LiDAR frame to BEV frame (to convert to global coordinates)

            self.tf_lidar2bev = np.array([[0.0, -1.0, 0.0, 0.0],
                                          [-1.0, 0.0, 0.0, 0.0],
                                          [0.0, 0.0, -1.0, 0.0],
                                          [0.0, 0.0, 0.0, 1.0]])

            # Initialize the regression model to calculate the braking distance

            self.velocity_braking_distance_model = monitors_functions.fit_velocity_braking_distance_model(
            )

            # Tracker

            self.mot_tracker = sort_functions.Sort(max_age, min_hits, n,
                                                   self.shapes,
                                                   self.trajectory_prediction,
                                                   self.filter_hdmap)

            self.init_scene = True

        output_image = np.ones(
            (self.image_height, self.image_width, 3),
            dtype="uint8")  # Black image to represent the scene

        # print("----------------")

        # Initialize ROS and monitors variables

        self.trackers_marker_list = visualization_msgs.msg.MarkerArray(
        )  # Initialize trackers list
        monitorized_area_colours = []

        trackers_in_route = 0
        nearest_distance = std_msgs.msg.Float64()
        nearest_distance.data = float(self.nearest_object_in_route)

        world_features = []
        trackers = []
        dynamic_trackers = []
        static_trackers = []
        ndt = 0  # Number of dynamic trackers

        timer_rosmsg = detections_rosmsg.header.stamp.to_sec()

        # Predict the ego-vehicle trajectory

        for lane in monitorized_lanes_rosmsg.lanes:
            if (lane.role == "current" and len(lane.left.way) >= 2):
                monitors_functions.new_ego_vehicle_prediction(
                    self, odom_rosmsg, lane)
        #monitors_functions.ego_vehicle_prediction(self,odom_rosmsg)
        #print("Ego vehicle braking distance: ", float(self.ego_braking_distance))

        # Convert input data to bboxes to perform Multi-Object Tracking

        # print("\nNumer of total detections: ", len(detections_rosmsg.bev_detections_list))
        bboxes_features, types = sort_functions.bbox_to_xywh_cls_conf(
            self, detections_rosmsg, output_image)
        # print("Number of relevant detections: ", len(bboxes_features)) # score > detection_threshold

        # Emergency break (Only detection) BEV_LiDAR frame!! Not BEV_Camera frame

        object_in_route = False
        dist = 99999

        if not self.collision_flag.data:
            for bbox, type_object in zip(bboxes_features, types):
                for lane in monitorized_lanes_rosmsg.lanes:
                    if (lane.role == "current" and len(lane.left.way) >= 2):
                        object_in_route = True
                        detection = Node()
                        bbox = bbox.reshape(1, -1)
                        detection.x = bbox[0, 0]
                        detection.y = -bbox[
                            0,
                            1]  # N.B. In OpenDrive this coordinate is the opposite

                        in_polygon, in_road, particular_monitorized_area, dist2centroid = monitors_functions.inside_lane(
                            lane, detection, type_object)

                        if in_polygon or in_road:
                            #distance_to_object = monitors_functions.calculate_distance_to_nearest_object_inside_route(monitorized_lanes_rosmsg,global_pos)
                            # print("In route")
                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y
                            distance_to_object = math.sqrt(
                                pow(ego_x_global - detection.x, 2) +
                                pow(ego_y_global - detection.y, 2))
                            # print("Distance to object: ", distance_to_object)
                            # print("Self nearest: ", self.nearest_object_in_route)
                            if distance_to_object < self.nearest_object_in_route:
                                # print("Update distance")
                                self.nearest_object_in_route = distance_to_object
                                nearest_distance.data = float(
                                    self.nearest_object_in_route)
                            # print("Braking distance: ", self.ego_braking_distance)

                            if distance_to_object < self.ego_braking_distance:
                                # print("Break")
                                self.collision_flag.data = True
                                self.pub_collision.publish(self.collision_flag)
                                self.cont = 0
                                break
                else:
                    continue
                break
            if not object_in_route:
                self.collision_flag.data = False
                nearest_distance.data = float(50000)
        else:
            for bbox, type_object in zip(bboxes_features, types):
                for lane in monitorized_lanes_rosmsg.lanes:
                    if (lane.role == "current" and len(lane.left.way) >= 2):
                        detection = Node()
                        bbox = bbox.reshape(1, -1)
                        detection.x = bbox[0, 0]
                        detection.y = -bbox[
                            0,
                            1]  # N.B. In OpenDrive this coordinate is the opposite

                        in_polygon, in_road, particular_monitorized_area, dist2centroid = monitors_functions.inside_lane(
                            lane, detection, type_object)

                        if in_polygon or in_road:
                            # print("In route after")
                            ego_x_global = odom_rosmsg.pose.pose.position.x
                            ego_y_global = -odom_rosmsg.pose.pose.position.y
                            aux = math.sqrt(
                                pow(ego_x_global - detection.x, 2) +
                                pow(ego_y_global - detection.y, 2))
                            if aux < dist:
                                dist = aux
                        break
            nearest_distance.data = dist
            if dist > 5:
                self.cont += 1
            else:
                self.cont = 0
            # print("distance to object after collision: ", dist)
            # print("cont: ", self.cont)
        if self.cont >= 3:
            self.collision_flag.data = False
        # print("Nearest object distance: ", nearest_distance.data)
        # print("Collision: ", self.collision_flag.data)
        self.pub_nearest_object_distance.publish(nearest_distance)
        self.pub_collision.publish(self.collision_flag)
        """