def extract_egocentric_costmap(costmap_2d, ego_position_in_world,
                               resulting_origin=None,
                               resulting_size=None, border_value=0):
    """
    Returns a costmap as seen by robot at ego_position_in_world.
    In this costmap robot is at (0, 0) with 0 angle.

    :param costmap_2d: global costmap
    :param ego_position_in_world: robot's position in the world costmap
    :param resulting_origin: Perform additional shifts and cuts so that
        resulting costmap origin and world size are equal to those parameters
    :param resulting_size: Perform additional shifts and cuts so that
        resulting costmap origin and world size are equal to those parameters
    :param border_value: the value at the border of the costmap to extrapolate
    :return: returns the egocentric costmap, with the robot in the middle.
    """
    ego_pose = np.asarray(ego_position_in_world)
    pixel_origin = costmap_2d.world_to_pixel(np.array(ego_pose[:2], dtype=np.float64))

    transform = cv2.getRotationMatrix2D(tuple(pixel_origin), 180 * ego_pose[2] / np.pi, scale=1)

    if resulting_size is None:
        resulting_size = costmap_2d.get_data().shape[:2][::-1]
    else:
        resulting_size = tuple(world_to_pixel(np.array(resulting_size, dtype=np.float64), np.array((0., 0.)), costmap_2d.get_resolution()))

    if resulting_origin is not None:
        resulting_origin = np.asarray(resulting_origin)
        assert resulting_origin.shape[0] == 2
        delta_shift = resulting_origin - (costmap_2d.get_origin() - ego_pose[:2])
        delta_shift_pixel = tuple(world_to_pixel(delta_shift, np.array((0., 0.)), costmap_2d.get_resolution()))
        shift_matrix = np.float32([[1, 0, -delta_shift_pixel[0]], [0, 1, -delta_shift_pixel[1]]])

        def _compose_affine_transforms(t1, t2):
            # http://stackoverflow.com/questions/13557066/built-in-function-to-combine-affine-transforms-in-opencv
            t1_expanded = np.array((t1[0], t1[1], (0, 0, 1)), dtype=np.float32)
            t2_expanded = np.array((t2[0], t2[1], (0, 0, 1)), dtype=np.float32)
            combined = np.dot(t2_expanded, t1_expanded)
            return combined[:2, :]

        transform = _compose_affine_transforms(transform, shift_matrix)
    else:
        resulting_origin = costmap_2d.get_origin() - ego_pose[:2]

    with single_threaded_opencv():
        rotated_data = cv2.warpAffine(costmap_2d.get_data(), transform, resulting_size,
                                      # this mode doesn't change the value of pixels during rotation
                                      flags=cv2.INTER_NEAREST, borderValue=border_value)

    return CostMap2D(rotated_data, costmap_2d.get_resolution(),
                     origin=resulting_origin)
def pose_collides(pose, footprint, costmap_data, origin, resolution):
    """
    Check if robot footprint at x, y (world coordinates) and
        oriented as yaw collides with lethal obstacles.
    :param pose array(3)[float]: (x, y, angle) of the robot
    :param footprint array(N, 2)[float]: footprint polygon
    :param costmap_data array(W, H)[uint8]: costmap data with obstacles
    :param origin array(2)[float]: (x, y) origin of the costmap
    :param resolution float: resolution of the costmap
    :return bool: whether the robot collides or not
    """
    kernel_image = get_pixel_footprint(pose[2], footprint, resolution)
    # Get the coordinates of where the footprint is inside the kernel_image (on pixel coordinates)
    kernel = np.where(kernel_image)
    # Move footprint to (x,y), all in pixel coordinates
    x, y = world_to_pixel(pose[:2], origin, resolution)
    collisions = y + kernel[0] - kernel_image.shape[0] // 2, x + kernel[1] - kernel_image.shape[1] // 2

    # Check if the footprint pixel coordinates are valid, this is, if they are not negative and are inside the map
    good = np.logical_and(np.logical_and(collisions[0] >= 0, collisions[0] < costmap_data.shape[0]),
                          np.logical_and(collisions[1] >= 0, collisions[1] < costmap_data.shape[1]))

    # Just from the footprint coordinates that are good, check if they collide
    # with obstacles inside the map
    return bool(np.any(costmap_data[collisions[0][good],
                                    collisions[1][good]] == CostMap2D.LETHAL_OBSTACLE))
Beispiel #3
0
def pose_collides(x, y, angle, robot, costmap):
    """
    Check if robot footprint at x, y (world coordinates) and
        oriented as yaw collides with lethal obstacles.
    :param x: robot pose
    :param y: robot pose
    :param angle: robot pose
    :param robot: Robot that will supply the footprint
    :param costmap Costmap2D: costmap containing the obstacles to collide with
    :return bool : does the pose collide?
    """
    kernel_image = get_pixel_footprint(angle, robot.get_footprint(), costmap.get_resolution())
    # Get the coordinates of where the footprint is inside the kernel_image (on pixel coordinates)
    kernel = np.where(kernel_image)
    # Move footprint to (x,y), all in pixel coordinates
    x, y = world_to_pixel(np.array([x, y]), costmap.get_origin(), costmap.get_resolution())
    collisions = y + kernel[0] - kernel_image.shape[0] // 2, x + kernel[1] - kernel_image.shape[1] // 2
    raw_map = costmap.get_data()
    # Check if the footprint pixel coordinates are valid, this is, if they are not negative and are inside the map
    good = np.logical_and(np.logical_and(collisions[0] >= 0, collisions[0] < raw_map.shape[0]),
                          np.logical_and(collisions[1] >= 0, collisions[1] < raw_map.shape[1]))

    # Just from the footprint coordinates that are good, check if they collide
    # with obstacles inside the map
    return bool(np.any(raw_map[collisions[0][good],
                               collisions[1][good]] == CostMap2D.LETHAL_OBSTACLE))
 def is_robot_colliding(robot_pose, footprint, costmap_data, origin, resolution):
     """
     Check costmap for obstacle at (world_x, world_y)
     If orientation is None, obstacle detection will check only the inscribed-radius
     distance collision. This means that, if the robot is not circular,
     there may be an undetected orientation-dependent collision.
     If orientation is given, footprint must also be given, and should be the same
     used by the costmap to inflate costs. Proper collision detection will
     then be done.
     :param robot_pose array(3)[float64]: pose of the robot
     :param footprint array(N, 2)[float64]: robot's footprint polygon
     :param costmap_data array(W, H)[uint8]: costmap obstacle data
     :param origin array(2)[float64]: origin (x, y) of the costmap
     :param resolution float: resolution of the costmap
     :return Bool: whether robot collides or not
     """
     # convert to costmap coordinate system:
     map_x, map_y = world_to_pixel(robot_pose[:2], origin, resolution)
     # TODO: remove this because the following is technically wrong: even if robot's origin is outside, it still can collide
     if not in_costmap_bounds(costmap_data, map_x, map_y):
         return False
     # Now check for orientation-dependent collision
     fp = get_pixel_footprint(robot_pose[2], footprint, resolution)
     values = get_blit_values(fp, costmap_data, map_x, map_y)
     return np.any(values == CostMap2D.LETHAL_OBSTACLE)
Beispiel #5
0
    def __init__(self, env):
        """
        Wrap the environment in this wrapper, that will make the observation egocentric
        :param env object: the environment to wrap.
        """
        super(EgocentricCostmap, self).__init__(env)
        # As openai gym style requires knowing resolution of the image up front
        self._egomap_x_bounds = np.array([-0.5, 3.
                                          ])  # aligned with robot's direction
        self._egomap_y_bounds = np.array([-2., 2.
                                          ])  # orthogonal to robot's direction
        resulting_size = np.array([
            self._egomap_x_bounds[1] - self._egomap_x_bounds[0],
            self._egomap_y_bounds[1] - self._egomap_y_bounds[0]
        ])

        pixel_size = world_to_pixel(resulting_size,
                                    np.zeros((2, )),
                                    resolution=0.03)
        data_shape = (pixel_size[1], pixel_size[0], 1)
        self.observation_space = spaces.Dict(
            OrderedDict((('env',
                          spaces.Box(low=0,
                                     high=255,
                                     shape=data_shape,
                                     dtype=np.uint8)),
                         ('goal',
                          spaces.Box(low=-1.,
                                     high=1.,
                                     shape=(3, 1),
                                     dtype=np.float64)))))
Beispiel #6
0
    def __init__(self):
        super(ColoredEgoCostmapRandomAisleTurnEnv, self).__init__()
        # TODO: Will need some trickery to do it fully openai gym style
        # As openai gym style requires knowing resolution of the image up front
        self._egomap_x_bounds = np.array([-0.5, 3.5
                                          ])  # aligned with robot's direction
        self._egomap_y_bounds = np.array([-2., 2.
                                          ])  # orthogonal to robot's direction
        resulting_size = (self._egomap_x_bounds[1] - self._egomap_x_bounds[0],
                          self._egomap_y_bounds[1] - self._egomap_y_bounds[0])

        pixel_size = world_to_pixel(np.asarray(resulting_size,
                                               dtype=np.float64),
                                    np.zeros((2, )),
                                    resolution=0.03)
        data_shape = (pixel_size[1], pixel_size[0], 1)
        self.observation_space = spaces.Dict(
            OrderedDict((('environment',
                          spaces.Box(low=0,
                                     high=255,
                                     shape=data_shape,
                                     dtype=np.uint8)),
                         ('goal',
                          spaces.Box(low=-1.,
                                     high=1.,
                                     shape=(5, 1),
                                     dtype=np.float64)))))
Beispiel #7
0
def is_robot_colliding_reference(robot_pose, footprint, costmap_data, origin,
                                 resolution):
    """
    Pure python implementation of is_robot_colliding
    Check costmap for obstacle at (world_x, world_y)
    If orientation is None, obstacle detection will check only the inscribed-radius
    distance collision. This means that, if the robot is not circular,
    there may be an undetected orientation-dependent collision.
    If orientation is given, footprint must also be given, and should be the same
    used by the costmap to inflate costs. Proper collision detection will
    then be done.
    """
    assert isinstance(robot_pose, np.ndarray)
    # convert to costmap coordinate system:
    map_x, map_y = world_to_pixel(robot_pose[:2], origin, resolution)
    # TODO: remove this because the following is technically wrong: even if robot's origin is outside, it still can collide
    if not in_costmap_bounds(costmap_data, map_x, map_y):
        return False
    cost = costmap_data[map_y, map_x]
    if cost in [CostMap2D.LETHAL_OBSTACLE]:
        return True

    # Now check for orientation-dependent collision
    fp = get_pixel_footprint(robot_pose[2], footprint, resolution)
    values = get_blit_values(fp, costmap_data, map_x, map_y)
    return (values == CostMap2D.LETHAL_OBSTACLE).any()
Beispiel #8
0
 def world_to_pixel(self, world_coords):
     """
     Convert world coordinates to map pixel coordinates
     Note that this does not check whether the converted coordinate
     :param world_coords: either a n-elem numpy array [x, y] or a n x 2 array with n points
     :return: same as input, but in pixel coordinates.
     """
     return world_to_pixel(world_coords, self._origin, self._resolution)
Beispiel #9
0
def _mark_block_on_static_map(static_map, poly_pt, color):
    """
    Draw block on a static map.
    :param static_map: static map to put block on
    :param point_pt: dimensions of the polygon
    :param color: color of the block on static map
    """
    vertices = np.array([
        world_to_pixel(np.array(p), static_map.get_origin(),
                       static_map.get_resolution()) for p in poly_pt
    ]).astype(np.int32)
    cv2.fillPoly(static_map.get_data(), [vertices], color)
def _mark_wall_on_static_map(static_map, p0, p1, width, color):
    """
    Draw wall on a static map.

    :param static_map: static map to put wall on
    :param p0: wall from here
    :param p1: wall to here
    :param width: width of the wall
    :param color: color of the wall on static map
    """
    thickness = max(1, int(width / static_map.get_resolution()))
    cv2.line(static_map.get_data(),
             tuple(
                 world_to_pixel(np.array(p0, dtype=np.float64),
                                static_map.get_origin(),
                                static_map.get_resolution())),
             tuple(
                 world_to_pixel(np.array(p1, dtype=np.float64),
                                static_map.get_origin(),
                                static_map.get_resolution())),
             color=color,
             thickness=thickness)
 def world_to_pixel_drawing_impl(physical_coords, origin, resolution,
                                 map_height):
     """
     World to pixel with a flip
     :param physical_coords: either (x, y)  or n x 2 array of (x, y), in physical units
     :param origin: origin of the map
     :param resolution: resolution of the map
     :param map_height Int: height of the map
     :return: either (x, y)  or n x 2 array of (x, y) in pixel units
     """
     pixel_coords = world_to_pixel(physical_coords, origin, resolution)
     # flip the y because we flip image for display
     pixel_coords[..., 1] = map_height - 1 - pixel_coords[..., 1]
     return pixel_coords
Beispiel #12
0
 def create_empty(world_size,
                  resolution,
                  world_origin=(0., 0.),
                  dtype=np.uint8):
     """
     Create an empty costmap with a given size and resolution
     :param world_size: (x, y) size of world in meters.
     :param resolution: Resolution in meters/pixel
     :param world_origin: (x, y) location of the origin relative to the bottom left corner.
     :param dtype: Data type for costmap
     :return: A CostMap2D object
     """
     pixel_size = world_to_pixel(np.asarray(world_size, dtype=np.float64),
                                 np.array((0., 0.)), resolution)
     data = np.zeros(pixel_size[::-1], dtype=dtype)
     return CostMap2D(data, resolution,
                      np.asarray(world_origin, dtype=np.float64))
Beispiel #13
0
    def _override_primitive_kernels(self, motion_primitives, footprint,
                                    use_full_kernels):
        resolution = motion_primitives.get_resolution()
        print('Setting up motion primitive kernels..')
        for p in motion_primitives.get_primitives():

            primitive_start = pixel_to_world(np.zeros((2, )), np.zeros((2, )),
                                             resolution)
            primitive_states = p.get_intermediate_states().copy()
            primitive_states[:, :2] += primitive_start

            full_cv_kernel_x = []
            full_cv_kernel_y = []
            for pose in primitive_states:
                kernel = get_pixel_footprint(pose[2], footprint, resolution)

                kernel_center = (kernel.shape[1] // 2, kernel.shape[0] // 2)
                kernel = np.where(kernel)

                px, py = world_to_pixel(pose[:2], np.zeros((2, )), resolution)
                full_cv_kernel_x.append(kernel[1] + (px - kernel_center[0]))
                full_cv_kernel_y.append(kernel[0] + (py - kernel_center[1]))

            full_cv_kernel_x = np.hstack(full_cv_kernel_x)
            full_cv_kernel_y = np.hstack(full_cv_kernel_y)
            full_cv_kernel = np.column_stack(
                (full_cv_kernel_x, full_cv_kernel_y)).astype(np.int32)

            row_view = np.ascontiguousarray(full_cv_kernel).view(
                np.dtype(
                    (np.void,
                     full_cv_kernel.dtype.itemsize * full_cv_kernel.shape[1])))
            _, idx = np.unique(row_view, return_index=True)
            full_cv_kernel = np.ascontiguousarray(full_cv_kernel[idx])

            if use_full_kernels:
                self.set_primitive_collision_pixels(p.starttheta_c,
                                                    p.motprimID,
                                                    full_cv_kernel)
            else:
                min_x, max_x = np.amin(full_cv_kernel[:, 0]), np.amax(
                    full_cv_kernel[:, 0])
                min_y, max_y = np.amin(full_cv_kernel[:, 1]), np.amax(
                    full_cv_kernel[:, 1])

                temp_img = np.zeros((max_y - min_y + 1, max_x - min_x + 1),
                                    dtype=np.uint8)
                temp_img[full_cv_kernel[:, 1] - min_y,
                         full_cv_kernel[:, 0] - min_x] = 255
                _, contours, _ = cv2.findContours(temp_img.copy(),
                                                  cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_NONE)
                contour = contours[0].reshape(-1, 2)

                perimeter_kernel = np.column_stack(
                    (contour[:, 0] + min_x,
                     contour[:, 1] + min_y)).astype(np.int32)

                self.set_primitive_collision_pixels(p.starttheta_c,
                                                    p.motprimID,
                                                    perimeter_kernel)

        print('Done.')
Beispiel #14
0
def forward_model_tricycle_motion_primitives(resolution,
                                             number_of_angles,
                                             target_v,
                                             tricycle_angle_samples,
                                             primitives_duration,
                                             front_wheel_rotation_speedup,
                                             v_samples,
                                             refine_dt=0.1):

    max_front_wheel_angle = IndustrialTricycleV1Dimensions.max_front_wheel_angle(
    )
    front_wheel_from_axis = IndustrialTricycleV1Dimensions.front_wheel_from_axis(
    )
    max_front_wheel_speed = IndustrialTricycleV1Dimensions.max_front_wheel_speed(
    )
    front_column_model_p_gain = IndustrialTricycleV1Dimensions.front_column_model_p_gain(
    )

    def forward_model(pose, initial_states, dt, control_signals):
        current_wheel_angles = initial_states[:, 0]
        next_poses, next_angles = tricycle_kinematic_step(
            pose,
            current_wheel_angles,
            dt,
            control_signals,
            max_front_wheel_angle,
            front_wheel_from_axis,
            max_front_wheel_speed,
            front_column_model_p_gain,
            model_front_column_pid=False)
        next_state = initial_states.copy()
        next_state[:, 0] = next_angles[:]
        next_state[:, 1] = control_signals[:, 0]
        next_state[:, 2] = 0.  # angular velocity is ignored
        return next_poses, next_state

    pose_evolution, state_evolution, control_evolution, refine_dt = control_choices_tricycle_exhaustive(
        forward_model,
        wheel_angle=0.,
        max_v=target_v,
        exhausitve_dt=refine_dt * front_wheel_rotation_speedup,
        refine_dt=refine_dt,
        n_steps=1,
        theta_samples=tricycle_angle_samples,
        v_samples=v_samples,
        extra_copy_n_steps=primitives_duration - 1,
        max_front_wheel_angle=max_front_wheel_angle,
        max_front_wheel_speed=max_front_wheel_speed)

    primitives = []
    for start_theta_discrete in range(number_of_angles):
        current_primitive_cells = []
        for primitive_id, (ego_poses, controls) in enumerate(
                zip(pose_evolution, control_evolution)):
            ego_poses = np.vstack(([[0., 0., 0.]], ego_poses))
            start_angle = angle_discrete_to_cont(start_theta_discrete,
                                                 number_of_angles)
            poses = from_egocentric_to_global(
                ego_poses,
                ego_pose_in_global_coordinates=np.array([0., 0., start_angle]))

            # to model precision loss while converting to .mprim file, we round it here
            poses = np.around(poses, decimals=4)
            last_pose = poses[-1]
            end_cell = np.zeros((3, ), dtype=int)

            center_cell_shift = pixel_to_world(np.zeros((2, )), np.zeros(
                (2, )), resolution)
            end_cell[:2] = world_to_pixel(center_cell_shift + last_pose[:2],
                                          np.zeros((2, )), resolution)
            perfect_last_pose = np.zeros((3, ), dtype=float)
            end_cell[2] = angle_cont_to_discrete(last_pose[2],
                                                 number_of_angles)

            current_primitive_cells.append(tuple(end_cell))

            perfect_last_pose[:2] = pixel_to_world(end_cell[:2], np.zeros(
                (2, )), resolution)
            perfect_last_pose[2] = angle_discrete_to_cont(
                end_cell[2], number_of_angles)

            action_cost_multiplier = 1

            primitive = MotionPrimitive(
                primitive_id=primitive_id,
                start_theta_discrete=start_theta_discrete,
                action_cost_multiplier=action_cost_multiplier,
                end_cell=end_cell,
                intermediate_states=poses,
                control_signals=controls)
            primitives.append(primitive)

    return MotionPrimitives(resolution=resolution,
                            number_of_angles=number_of_angles,
                            mprim_list=primitives)
Beispiel #15
0
def forward_model_diffdrive_motion_primitives(resolution,
                                              number_of_angles,
                                              target_v,
                                              target_w,
                                              w_samples_in_each_direction,
                                              primitives_duration,
                                              refine_dt=0.05):
    def forward_model(pose, state, dt, control_signals):
        new_pose = kinematic_body_pose_motion_step(
            pose=pose,
            linear_velocity=control_signals[:, 0],
            angular_velocity=control_signals[:, 1],
            dt=dt)
        next_state = state.copy()
        next_state[:, 0] = control_signals[:, 0]
        next_state[:, 1] = control_signals[:, 1]
        return new_pose, next_state

    pose_evolution, state_evolution, control_evolution, refine_dt, control_costs = control_choices_diff_drive_exhaustive(
        max_v=target_v,
        max_w=target_w,
        forward_model=forward_model,
        initial_state=(0., 0.),
        refine_dt=refine_dt,
        exhausitve_dt=refine_dt * primitives_duration,
        n_steps=1,
        w_samples_in_each_direction=w_samples_in_each_direction,
        enable_turn_in_place=True,
        v_samples=1)

    primitives = []
    for start_theta_discrete in range(number_of_angles):
        current_primitive_cells = []
        for primitive_id, (ego_poses, controls) in enumerate(
                zip(pose_evolution, control_evolution)):
            ego_poses = np.vstack(([[0., 0., 0.]], ego_poses))
            start_angle = angle_discrete_to_cont(start_theta_discrete,
                                                 number_of_angles)
            poses = from_egocentric_to_global(
                ego_poses,
                ego_pose_in_global_coordinates=np.array([0., 0., start_angle]))

            # to model precision loss while converting to .mprim file, we round it here
            poses = np.around(poses, decimals=4)
            last_pose = poses[-1]
            end_cell = np.zeros((3, ), dtype=int)

            center_cell_shift = pixel_to_world(np.zeros((2, )), np.zeros(
                (2, )), resolution)
            end_cell[:2] = world_to_pixel(center_cell_shift + last_pose[:2],
                                          np.zeros((2, )), resolution)
            perfect_last_pose = np.zeros((3, ), dtype=float)
            end_cell[2] = angle_cont_to_discrete(last_pose[2],
                                                 number_of_angles)

            current_primitive_cells.append(tuple(end_cell))

            perfect_last_pose[:2] = pixel_to_world(end_cell[:2], np.zeros(
                (2, )), resolution)
            perfect_last_pose[2] = angle_discrete_to_cont(
                end_cell[2], number_of_angles)

            # penalize slow movement forward and sudden jerns
            if controls[0, 0] < target_v * 0.5 or abs(
                    controls[0, 1]) > 0.5 * target_w:
                action_cost_multiplier = 100
            else:
                action_cost_multiplier = 1

            primitive = MotionPrimitive(
                primitive_id=primitive_id,
                start_theta_discrete=start_theta_discrete,
                action_cost_multiplier=action_cost_multiplier,
                end_cell=end_cell,
                intermediate_states=poses,
                control_signals=controls)
            primitives.append(primitive)

    return MotionPrimitives(resolution=resolution,
                            number_of_angles=number_of_angles,
                            mprim_list=primitives)