Пример #1
0
def pose_collides(x, y, angle, robot, costmap):
    """
    Check if robot footprint at x, y (world coordinates) and
        oriented as yaw collides with lethal obstacles.
    :param x: robot pose
    :param y: robot pose
    :param angle: robot pose
    :param robot: Robot that will supply the footprint
    :param costmap Costmap2D: costmap containing the obstacles to collide with
    :return bool : does the pose collide?
    """
    kernel_image = get_pixel_footprint(angle, robot.get_footprint(), costmap.get_resolution())
    # Get the coordinates of where the footprint is inside the kernel_image (on pixel coordinates)
    kernel = np.where(kernel_image)
    # Move footprint to (x,y), all in pixel coordinates
    x, y = world_to_pixel(np.array([x, y]), costmap.get_origin(), costmap.get_resolution())
    collisions = y + kernel[0] - kernel_image.shape[0] // 2, x + kernel[1] - kernel_image.shape[1] // 2
    raw_map = costmap.get_data()
    # Check if the footprint pixel coordinates are valid, this is, if they are not negative and are inside the map
    good = np.logical_and(np.logical_and(collisions[0] >= 0, collisions[0] < raw_map.shape[0]),
                          np.logical_and(collisions[1] >= 0, collisions[1] < raw_map.shape[1]))

    # Just from the footprint coordinates that are good, check if they collide
    # with obstacles inside the map
    return bool(np.any(raw_map[collisions[0][good],
                               collisions[1][good]] == CostMap2D.LETHAL_OBSTACLE))
Пример #2
0
def is_robot_colliding_reference(robot_pose, footprint, costmap_data, origin,
                                 resolution):
    """
    Pure python implementation of is_robot_colliding
    Check costmap for obstacle at (world_x, world_y)
    If orientation is None, obstacle detection will check only the inscribed-radius
    distance collision. This means that, if the robot is not circular,
    there may be an undetected orientation-dependent collision.
    If orientation is given, footprint must also be given, and should be the same
    used by the costmap to inflate costs. Proper collision detection will
    then be done.
    """
    assert isinstance(robot_pose, np.ndarray)
    # convert to costmap coordinate system:
    map_x, map_y = world_to_pixel(robot_pose[:2], origin, resolution)
    # TODO: remove this because the following is technically wrong: even if robot's origin is outside, it still can collide
    if not in_costmap_bounds(costmap_data, map_x, map_y):
        return False
    cost = costmap_data[map_y, map_x]
    if cost in [CostMap2D.LETHAL_OBSTACLE]:
        return True

    # Now check for orientation-dependent collision
    fp = get_pixel_footprint(robot_pose[2], footprint, resolution)
    values = get_blit_values(fp, costmap_data, map_x, map_y)
    return (values == CostMap2D.LETHAL_OBSTACLE).any()
Пример #3
0
def test_get_pixel_footprint_consistency():
    footrpints = [_rectangular_footprint(), _tricycle_footprint()]
    for fill in [True, False]:
        for resolution in [0.03, 0.05]:
            for f in footrpints:
                angles = np.random.random_sample(1000) * np.pi * 2 - np.pi
                for angle in angles:
                    pixel_footprint = get_pixel_footprint(angle,
                                                          f,
                                                          resolution,
                                                          fill=fill)
                    reference = get_pixel_footprint_py(angle,
                                                       f,
                                                       resolution,
                                                       fill=fill)
                    np.testing.assert_array_equal(pixel_footprint, reference)
def get_pixel_footprint_for_drawing(angle,
                                    robot_footprint,
                                    map_resolution,
                                    fill=True):
    """
    Return pixel footprint kernel for visualization of the robot.
    The footprint kernel is flipped.
    angle_range - angle in physical coordinates (!)

    :param angle: the angle
    :param robot_footprint:  robots foot print
    :param map_resolution: resolution of the map
    :param fill:  shuold we fill
    :return: picture of the footprint
    """
    footprint_picture = get_pixel_footprint(angle, robot_footprint,
                                            map_resolution, fill)
    footprint_picture = np.flipud(footprint_picture)
    return footprint_picture
Пример #5
0
    def _override_primitive_kernels(self, motion_primitives, footprint,
                                    use_full_kernels):
        resolution = motion_primitives.get_resolution()
        print('Setting up motion primitive kernels..')
        for p in motion_primitives.get_primitives():

            primitive_start = pixel_to_world(np.zeros((2, )), np.zeros((2, )),
                                             resolution)
            primitive_states = p.get_intermediate_states().copy()
            primitive_states[:, :2] += primitive_start

            full_cv_kernel_x = []
            full_cv_kernel_y = []
            for pose in primitive_states:
                kernel = get_pixel_footprint(pose[2], footprint, resolution)

                kernel_center = (kernel.shape[1] // 2, kernel.shape[0] // 2)
                kernel = np.where(kernel)

                px, py = world_to_pixel(pose[:2], np.zeros((2, )), resolution)
                full_cv_kernel_x.append(kernel[1] + (px - kernel_center[0]))
                full_cv_kernel_y.append(kernel[0] + (py - kernel_center[1]))

            full_cv_kernel_x = np.hstack(full_cv_kernel_x)
            full_cv_kernel_y = np.hstack(full_cv_kernel_y)
            full_cv_kernel = np.column_stack(
                (full_cv_kernel_x, full_cv_kernel_y)).astype(np.int32)

            row_view = np.ascontiguousarray(full_cv_kernel).view(
                np.dtype(
                    (np.void,
                     full_cv_kernel.dtype.itemsize * full_cv_kernel.shape[1])))
            _, idx = np.unique(row_view, return_index=True)
            full_cv_kernel = np.ascontiguousarray(full_cv_kernel[idx])

            if use_full_kernels:
                self.set_primitive_collision_pixels(p.starttheta_c,
                                                    p.motprimID,
                                                    full_cv_kernel)
            else:
                min_x, max_x = np.amin(full_cv_kernel[:, 0]), np.amax(
                    full_cv_kernel[:, 0])
                min_y, max_y = np.amin(full_cv_kernel[:, 1]), np.amax(
                    full_cv_kernel[:, 1])

                temp_img = np.zeros((max_y - min_y + 1, max_x - min_x + 1),
                                    dtype=np.uint8)
                temp_img[full_cv_kernel[:, 1] - min_y,
                         full_cv_kernel[:, 0] - min_x] = 255
                _, contours, _ = cv2.findContours(temp_img.copy(),
                                                  cv2.RETR_EXTERNAL,
                                                  cv2.CHAIN_APPROX_NONE)
                contour = contours[0].reshape(-1, 2)

                perimeter_kernel = np.column_stack(
                    (contour[:, 0] + min_x,
                     contour[:, 1] + min_y)).astype(np.int32)

                self.set_primitive_collision_pixels(p.starttheta_c,
                                                    p.motprimID,
                                                    perimeter_kernel)

        print('Done.')