Exemple #1
0
def get_track_box(annotation: Dict[str, Any],
                  center_coordinates: Tuple[float, float],
                  center_pixels: Tuple[float, float],
                  resolution: float = 0.1) -> np.ndarray:
    """
    Get four corners of bounding box for agent in pixels.
    :param annotation: The annotation record of the agent.
    :param center_coordinates: (x, y) coordinates in global frame
        of the center of the image.
    :param center_pixels: (row_index, column_index) location of the center
        of the image in pixel coordinates.
    :param resolution: Resolution pixels/meter of the image.
    """

    assert resolution > 0

    location = annotation['translation'][:2]
    yaw_in_radians = quaternion_yaw(Quaternion(annotation['rotation']))

    #print('yaw_in_radians', yaw_in_radians)
    row_pixel, column_pixel = convert_to_pixel_coords(location,
                                                      center_coordinates,
                                                      center_pixels,
                                                      resolution)
    #print('row_pixel, column_pixel', row_pixel, column_pixel)
    #print('center_pixels', center_pixels)

    width = annotation['size'][0] / resolution
    length = annotation['size'][1] / resolution

    # Width and length are switched here so that we can draw them along the x-axis as
    # opposed to the y. This makes rotation easier.
    return pixels_to_box_corners(row_pixel, column_pixel, length, width,
                                 yaw_in_radians)
Exemple #2
0
def get_intermediate_rep(BEV_points, mask, semantic_class_points,
                         center_coordinates):
    corresponding_BEV_points = BEV_points[:, mask]

    num_points = corresponding_BEV_points.shape[1]
    lane_rep = np.zeros((256, 256))
    road_rep = np.zeros((256, 256))
    obstacle_rep = np.zeros((256, 256))
    res = 0.5
    for i in range(num_points):
        row_pixel, column_pixel = convert_to_pixel_coords(corresponding_BEV_points[:,i][:2], \
                                                          center_coordinates, \
                                                          (128, 128), res)
        if row_pixel < 0 or column_pixel < 0 or row_pixel >= 256 or column_pixel >= 256:
            import pdb
            pdb.set_trace()
        semantic_class = semantic_class_points[i]
        if semantic_class == class_to_idx['Road']:
            road_rep[row_pixel][column_pixel] += 1

        if semantic_class == class_to_idx[
                'Lane Marking - General'] or semantic_class == class_to_idx[
                    'Lane Marking - Crosswalk']:
            lane_rep[row_pixel][column_pixel] += 1

        if semantic_class == class_to_idx[
                'Building'] or semantic_class == class_to_idx[
                    'Curb'] or semantic_class == class_to_idx['Vegetation']:
            obstacle_rep[row_pixel][column_pixel] += 1

    return road_rep, lane_rep, obstacle_rep
Exemple #3
0
    def make_trans_img(self):
        buffer = max([
            self.meters_ahead, self.meters_behind, self.meters_left,
            self.meters_right
        ]) * 2
        image_side_length = int(buffer / self.resolution)
        central_track_pixels = (image_side_length / 2, image_side_length / 2)

        self.trans_image = np.zeros((image_side_length, image_side_length, 3))

        for x in np.arange(-59.9, 59.9, self.resolution):
            for y in np.arange(-59.9, 59.9, self.resolution):
                loc = (self.agent_pos[0] + x, self.agent_pos[1] + y)
                pix_pos = convert_to_pixel_coords(loc, self.agent_pos,
                                                  central_track_pixels,
                                                  self.resolution)
                # print('loc, pix pos', loc, pix_pos)
                self.trans_image[pix_pos[0], pix_pos[1], 0] = loc[0]
                self.trans_image[pix_pos[0], pix_pos[1], 1] = loc[1]

        rotated_image = cv2.warpAffine(
            self.trans_image, self.rotation_mat,
            (self.trans_image.shape[1], self.trans_image.shape[0]))

        row_crop, col_crop = get_crops(self.meters_ahead, self.meters_behind,
                                       self.meters_left, self.meters_right,
                                       self.resolution, image_side_length)

        self.trans_image = rotated_image[row_crop, col_crop]

        return self.trans_image
Exemple #4
0
def populate_agent_states(center_agent_annotation: Dict[str, Any],
                          center_agent_pixels: Tuple[float, float],
                          annotations: List[Dict[str, Any]],
                          base_image: np.ndarray,
                          helper: PredictHelper,
                          resolution: float = 0.1) -> None:
    """
    Adds agent states to 4 channel base_image
    :param center_agent_annotation: Annotation record for the agent
        that is in the center of the image.
    :param center_agent_pixels: Pixel location of the agent in the
        center of the image.
    :param annotations: Annotation records for other agents
    :param base_image: 4 channel image to populate with agent states.
    :param helper: Predict helper
    :param resolution: Size of the image in pixels / meter.
    :return: None.
    """

    agent_x, agent_y = center_agent_annotation['translation'][:2]

    for i, annotation in enumerate(annotations):
        if annotation['instance_token'] != center_agent_annotation[
                'instance_token']:
            location = annotation['translation'][:2]
            row_pixel, column_pixel = convert_to_pixel_coords(
                location, (agent_x, agent_y), center_agent_pixels, resolution)

            if 0 <= row_pixel < base_image.shape[
                    0] and 0 <= column_pixel < base_image.shape[1]:

                v = helper.get_velocity_for_agent(annotation['instance_token'],
                                                  annotation['sample_token'])

                a = helper.get_acceleration_for_agent(
                    annotation['instance_token'], annotation['sample_token'])

                omega = helper.get_heading_change_rate_for_agent(
                    annotation['instance_token'], annotation['sample_token'])

                if base_image[row_pixel, column_pixel, 0] == 0:
                    base_image[row_pixel, column_pixel, 0] = 1
                    if not np.isnan(v):
                        base_image[row_pixel, column_pixel, 1] = v
                    if not np.isnan(a):
                        base_image[row_pixel, column_pixel, 2] = a
                    if not np.isnan(omega):
                        base_image[row_pixel, column_pixel, 3] = omega

                else:
                    base_image[row_pixel, column_pixel, 0] += 1
                    if not np.isnan(v):
                        base_image[row_pixel, column_pixel, 1] = min(
                            v, base_image[row_pixel, column_pixel, 1])
                    if not np.isnan(a):
                        base_image[row_pixel, column_pixel, 2] = min(
                            a, base_image[row_pixel, column_pixel, 2])
                    if not np.isnan(omega):
                        base_image[row_pixel, column_pixel, 3] = min(
                            omega, base_image[row_pixel, column_pixel, 3])
Exemple #5
0
def draw_lanes_on_image(
    image: np.ndarray,
    lanes: Dict[str, List[Tuple[float, float, float]]],
    agent_global_coords: Tuple[float, float],
    agent_yaw_in_radians: float,
    agent_pixels: Tuple[int, int],
    resolution: float,
    color_function: Callable[[float, float],
                             Color] = color_by_yaw) -> np.ndarray:
    """
    Draws lanes on image.
    :param image: Image to draw lanes on. Preferably all-black or all-white image.
    :param lanes: Mapping from lane id to list of coordinate tuples in global coordinate system.
    :param agent_global_coords: Location of the agent in the global coordinate frame.
    :param agent_yaw_in_radians: Yaw of agent in radians.
    :param agent_pixels: Location of the agent in the image as (row_pixel, column_pixel).
    :param resolution: Resolution in meters/pixel.
    :param color_function: By default, lanes are colored by the yaw difference between the pose
    on the lane and the agent yaw. However, you can supply your own function to color the lanes.
    :return: Image (represented as np.ndarray) with lanes drawn.
    """

    for poses_along_lane in lanes.values():

        for start_pose, end_pose in zip(poses_along_lane[:-1],
                                        poses_along_lane[1:]):

            start_pixels = convert_to_pixel_coords(start_pose[:2],
                                                   agent_global_coords,
                                                   agent_pixels, resolution)
            end_pixels = convert_to_pixel_coords(end_pose[:2],
                                                 agent_global_coords,
                                                 agent_pixels, resolution)

            start_pixels = (start_pixels[1], start_pixels[0])
            end_pixels = (end_pixels[1], end_pixels[0])

            color = color_function(agent_yaw_in_radians, start_pose[2])

            # Need to flip the row coordinate and the column coordinate
            # because of cv2 convention
            cv2.line(image, start_pixels, end_pixels, color, thickness=5)

    return image
Exemple #6
0
            yaw = quaternion_yaw(Quaternion(needed_ego_pose['rotation']))
            rotation_matrix = np.array([[np.cos(yaw), -np.sin(yaw)],
                                        [np.sin(yaw), np.cos(yaw)]]).T
            ref_ego_x, ref_ego_y = needed_ego_pose['translation'][:2]

            for pc_h, gpc_orig, ps_h, ep_h, m, s_c_p, ims, token in time_frame:

                gpc_h = copy.deepcopy(gpc_orig)

                #get target vehicle representation
                location = ep_h['translation'][:2]
                print(ref_ego_x == location[0] and ref_ego_y == location[1])

                row_pixel, column_pixel = convert_to_pixel_coords(location, \
                                                                  (ref_ego_x, ref_ego_y), \
                                                                  (128, 128), res)
                target_rep = np.zeros((256, 256, 3))
                width_target = 1
                length_target = 1
                yaw_in_radians = quaternion_yaw(Quaternion(ep_h['rotation']))
                box = pixels_to_box_corners(row_pixel, column_pixel,
                                            length_target, width_target,
                                            yaw_in_radians)
                cv2.fillPoly(target_rep, pts=[np.int0(box)], color=1)
                rotation_mat = get_rotation_matrix(target_rep.shape,
                                                   yaw + np.pi / 2)
                target_rep = cv2.warpAffine(
                    target_rep, rotation_mat,
                    (target_rep.shape[1], target_rep.shape[0]))
                row_crop, col_crop = get_crops(offset, offset, offset, offset,
Exemple #7
0
    def generate_virtual_mask(self, translation, rotation, lanes, sample_token: str, show_agent=True,
                              past_trj_len=4, future_trj_len=6, min_dist=6):
        buffer = max([self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right]) * 2
        image_side_length = int(buffer / self.resolution)
        central_track_pixels = (image_side_length / 2, image_side_length / 2)

        base_image = np.zeros((image_side_length, image_side_length, 3))

        translation_xy = []
        past_xy = []
        future_xy = []

        for lane in lanes:
            length = len(lane)

            if length < (past_trj_len + future_trj_len + 1):
                continue

            location = None
            current_index = None
            for _ in range(5):
                is_collide = False
                if (length - past_trj_len - future_trj_len - 1) < 1:
                    continue
                # start_index = np.random.randint(low=0, high=(length - past_trj_len - future_trj_len - 1))
                start_index = 0
                max_gap = (length - start_index) // (past_trj_len + future_trj_len + 1)
                if max_gap < 2:
                    continue
                # gap = np.random.randint(low=1, high=max_gap+1)
                gap = max_gap
                # current_index = start_index + (gap * past_trj_len)
                # location_ = lane[current_index, :2]

                mask = np.arange(start_index, length, step=gap)
                lane_ = lane[mask]

                current_index = past_trj_len
                location_ = lane_[current_index, :2]

                # check collision
                for xy in translation_xy:
                    diff = location_ - xy
                    if np.linalg.norm(diff) < min_dist:
                        is_collide = True
                        break
                if not is_collide:
                    location = location_
                    lane = lane_
                    break

            if location is None:
                continue

            translation_xy.append(location)
            past_xy.append(lane[current_index - past_trj_len:current_index])
            future_xy.append(lane[current_index + 1:current_index + future_trj_len + 1])

            # draw virtual agent mask
            if show_agent:
                agent_x, agent_y = translation[:2]

                ax, ay = lane[current_index, 0], lane[current_index, 1]
                bx, by = lane[current_index + 1, 0], lane[current_index + 1, 1]
                if ax == bx:
                    bx += 0.0001
                rot = np.arctan((by - ay) / (bx - ax))

                yaw_in_radians = quaternion_yaw(Quaternion(axis=[0, 0, 1], angle=rot))
                row_pixel, column_pixel = convert_to_pixel_coords(
                    location, (agent_x, agent_y), central_track_pixels, self.resolution)
                size = [2.2, 5.6, 2.1]
                width = size[0] / self.resolution
                length = size[1] / self.resolution
                box = pixels_to_box_corners(row_pixel, column_pixel, length, width, yaw_in_radians)
                color = (255, 255, 0)
                cv2.fillPoly(base_image, pts=[np.int0(box)], color=color)

        xy_global = [np.asarray(past_xy), np.asarray(future_xy), np.asarray(translation_xy)]

        center_agent_yaw = quaternion_yaw(Quaternion(rotation))
        rotation_mat = get_rotation_matrix(base_image.shape, center_agent_yaw)

        rotated_image = cv2.warpAffine(base_image, rotation_mat, (base_image.shape[1], base_image.shape[0]))

        row_crop, col_crop = get_crops(self.meters_ahead, self.meters_behind,
                                       self.meters_left, self.meters_right, self.resolution,
                                       image_side_length)

        return rotated_image[row_crop, col_crop].astype('uint8'), xy_global