Beispiel #1
0
    def __init__(self):
        # Define the volume of interest for obstacle detection while
        # moving forward. stretch's mast shakes while moving forward,
        # which can result in false positives if the sensitivity is
        # too high.

        # How far to look ahead.
        voi_side_x_m = 0.15
        # Robot's width plus a safety margin.
        voi_side_y_m = 0.4
        voi_height_m = 0.07  #0.06
        robot_front_x_m = 0.08
        voi_axes = np.identity(3)
        voi_origin = np.array([robot_front_x_m, -voi_side_y_m / 2.0, -0.03])
        self.voi = rm.ROSVolumeOfInterest('base_link', voi_origin, voi_axes,
                                          voi_side_x_m, voi_side_y_m,
                                          voi_height_m)

        self.obstacle_pixel_thresh = 100

        m_per_pix = 0.006
        pixel_dtype = np.uint8
        self.max_height_im = rm.ROSMaxHeightImage(self.voi, m_per_pix,
                                                  pixel_dtype)
        self.max_height_im.print_info()
Beispiel #2
0
    def __init__(self, debug_directory=None):
        if debug_directory is not None: 
            self.debug_directory = debug_directory + 'fast_single_view_planner/'
            print('MoveBase __init__: self.debug_directory =', self.debug_directory)
        else:
            self.debug_directory = debug_directory
        
        # Define the volume of interest for planning using the current
        # view.
        
        # How far to look ahead.
        look_ahead_distance_m = 2.0
        # Robot's width plus a safety margin.
        look_to_side_distance_m = 1.3

        m_per_pix = 0.006
        pixel_dtype = np.uint8

        robot_head_above_ground = 1.13
        lowest_distance_below_ground = 0.03

        voi_height_m = robot_head_above_ground + lowest_distance_below_ground
        robot_front_x_m = -0.1
        voi_side_x_m = abs(robot_front_x_m) + look_ahead_distance_m
        voi_side_y_m = 2.0 * look_to_side_distance_m
        voi_axes = np.identity(3)
        voi_origin = np.array([robot_front_x_m, -voi_side_y_m/2.0, -lowest_distance_below_ground])
        self.frame_id = 'base_link'
        self.voi = rm.ROSVolumeOfInterest(self.frame_id, voi_origin, voi_axes, voi_side_x_m, voi_side_y_m, voi_height_m)
        self.max_height_im = rm.ROSMaxHeightImage(self.voi, m_per_pix, pixel_dtype)
        self.max_height_im.print_info()
        self.updated = False
Beispiel #3
0
    def __init__(self, max_height_im=None, voi_side_m=8.0, voi_origin_m=None):
        if max_height_im is not None:
            self.max_height_im = max_height_im
        else:
            # How to best set this volume of interest (VOI) merits further
            # consideration. Note that representing a smaller range of heights
            # results in higher height resolution when using np.uint8
            # pixels. For this VOI, 0.0 should be the nominal ground height
            # achieved via calibration.

            # Set to approximately the height of the D435i. This should result
            # in the volume of interest (VOI) containing the highest
            # manipulable surfaces. Also, when the top of the viewing frustum
            # is parallel to the ground it will be at or close to the top of
            # the volume of interest.
            
            robot_head_above_ground = 1.13
            
            # How far below the expected floor height the volume of interest
            # should extend is less clear. Sunken living rooms and staircases
            # can go well below the floor and a standard stair step should be
            # less than 20cm tall (0.2 m below the floor). However, the robot
            # should not go into these areas. For now, the volume of interest
            # (VOI) will contain points that the robot can potentially reach
            # its arm over or drive over (traverse). This implies that
            # unobserved points on the floor should be treated with great
            # caution, since they might be points significantly below the
            # floor that should not be traversed. For now, the robot will not
            # represent ramps that it might safely descend. It should be able
            # to represent floor points that look slightly lower due to noise
            # that can vary with floor type and small calibration errors. It
            # should be able to represent small traversable depressions in the
            # floor. However, there is a risk that points that are too low
            # will be classified as traversable floor. This risk is mitigated
            # by separate point cloud based obstacle detection while moving
            # and cliff sensors.
            
            lowest_distance_below_ground = 0.05 #5cm
            
            total_height = robot_head_above_ground + lowest_distance_below_ground
            # 8m x 8m region 
            voi_side_m = voi_side_m
            voi_axes = np.identity(3)
            if voi_origin_m is None: 
                voi_origin = np.array([-voi_side_m/2.0, -voi_side_m/2.0, -lowest_distance_below_ground])
            voi = rm.ROSVolumeOfInterest('map', voi_origin, voi_axes, voi_side_m, voi_side_m, total_height)

            m_per_pix = 0.006
            pixel_dtype = np.uint8
            
            self.max_height_im = rm.ROSMaxHeightImage(voi, m_per_pix, pixel_dtype, use_camera_depth_image=True)
            self.max_height_im.create_blank_rgb_image()
            
        self.max_height_im.print_info()