Пример #1
0
    def extract_predicate_values(self, data_point_count: int,
                                 goal_positions: np.ndarray,
                                 goal_orientations: np.ndarray,
                                 handle_bb_mins: np.ndarray,
                                 handle_bb_maxs: np.ndarray) -> np.ndarray:
        predicate_values = np.zeros(
            (data_point_count,
             MetaPredicateData.get_predicate_count(GraspingPredicateLibrary)),
            dtype=int)
        predicates = MetaPredicateData.get_predicates(GraspingPredicateLibrary)
        for i in range(data_point_count):
            gripper_position = Vector3(x=goal_positions[i, 0],
                                       y=goal_positions[i, 1],
                                       z=goal_positions[i, 2])
            gripper_orientation = Vector3(x=goal_orientations[i, 0],
                                          y=goal_orientations[i, 1],
                                          z=goal_orientations[i, 2])
            gripper_pose = Pose3(position=gripper_position,
                                 orientation=gripper_orientation)

            bb_min = Vector3(x=handle_bb_mins[i, 0],
                             y=handle_bb_mins[i, 1],
                             z=handle_bb_mins[i, 2])
            bb_max = Vector3(x=handle_bb_maxs[i, 0],
                             y=handle_bb_maxs[i, 1],
                             z=handle_bb_maxs[i, 2])
            handle_bbox = BBox3(min_values=bb_min, max_values=bb_max)

            for j, p in enumerate(predicates):
                predicate_values[i, j] = p(gripper_pose, handle_bbox)
        return predicate_values
Пример #2
0
    def extract_predicate_values(self, data_point_count: int,
                                 handle_positions: np.ndarray,
                                 goal_positions: np.ndarray,
                                 motion_durations: np.ndarray) -> np.ndarray:
        predicate_values = np.zeros(
            (data_point_count,
             MetaPredicateData.get_predicate_count(PullingPredicateLibrary) +
             1),
            dtype=int)
        predicates = MetaPredicateData.get_predicates(PullingPredicateLibrary)
        for i in range(data_point_count):
            handle_position = Vector3(x=handle_positions[i, 0],
                                      y=handle_positions[i, 1],
                                      z=handle_positions[i, 2])
            handle_orientation = Vector3(x=0., y=0., z=0.)
            handle_pose = Pose3(position=handle_position,
                                orientation=handle_orientation)

            goal_position = Vector3(x=goal_positions[i, 0],
                                    y=goal_positions[i, 1],
                                    z=goal_positions[i, 2])
            goal_orientation = Vector3(x=0., y=0., z=0.)
            goal_pose = Pose3(position=goal_position,
                              orientation=goal_orientation)

            for j, p in enumerate(predicates):
                predicate_values[i, j] = p(handle_pose, goal_pose)

            predicate_values[i, -1] = PullingPredicateLibrary.qualitative_time(
                motion_durations[i])
        return predicate_values
Пример #3
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we get the z-projections of the objects that are already on the surface
        object_polygons = list()
        for obj in self.objects:
            polygon = obj.get_z_projection()
            object_polygons.append(polygon)

        # we generate samples in the free space on the surface, i.e.
        # we ignore samples that cause collisions with the other objects
        collected_samples = 0
        distances = np.zeros(number_of_samples)
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random position on the surface
            position = Vector2.random_vector(self.surface.bbox.min, self.surface.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2.*np.pi)
            obj_copy.rotate_around_z(z_orientation)

            # we take the generated pose as a valid candidate if the
            # object doesn't collide with any of the objects on the surface
            position = Vector3(position.x,
                               position.y,
                               self.surface.pose.position.z)
            orientation = Vector3(obj_copy.pose.orientation.x,
                                  obj_copy.pose.orientation.y,
                                  z_orientation)
            pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
            candidate_poses.append(pose)

            distances[collected_samples] = position.distance(self.robot_pose.position)
            collected_samples += 1

        # we value closer poses higher than farther ones;
        # the results are treated as success probabilities,
        # though the values actually encode preference
        success_probabilities = np.max(distances) / distances
        success_probabilities /= np.sum(success_probabilities)

        return {'candidate_poses': candidate_poses,
                'success_probabilities': success_probabilities}
Пример #4
0
    def __init__(self, frame_id='', position=Vector3(), orientation=Vector3()):
        '''Keyword arguments:

        position -- an 'action_execution.geometry.vector.Vector3' object
        orientation -- an 'action_execution.geometry.vector.Vector3' object

        '''
        self.frame_id = frame_id
        self.position = deepcopy(position)
        self.orientation = deepcopy(orientation)
Пример #5
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we generate samples over the target object
        collected_samples = 0
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random 2D position over the target object
            position = Vector2.random_vector(self.target_object.bbox.min,
                                             self.target_object.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2.*np.pi)
            obj_copy.rotate_around_z(z_orientation)

            position = Vector3(position.x,
                               position.y,
                               self.target_object.bbox.max.z)
            orientation = Vector3(obj_copy.pose.orientation.x,
                                  obj_copy.pose.orientation.y,
                                  z_orientation)
            pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
            candidate_poses.append(pose)
            collected_samples += 1

        success_probabilities = np.ones(number_of_samples) / (number_of_samples * 1.)
        return {'candidate_poses': candidate_poses,
                'success_probabilities': success_probabilities}
Пример #6
0
 def get_coordinates(self):
     '''Returns a list of 'action_execution.geometry.vector.Vector3' objects
     that represent the coordinates of the bounding box.
     '''
     point1 = Vector3(self.min.x, self.min.y, self.min.z)
     point2 = Vector3(self.min.x, self.min.y, self.max.z)
     point3 = Vector3(self.min.x, self.max.y, self.min.z)
     point4 = Vector3(self.min.x, self.max.y, self.max.z)
     point5 = Vector3(self.max.x, self.min.y, self.min.z)
     point6 = Vector3(self.max.x, self.min.y, self.max.z)
     point7 = Vector3(self.max.x, self.max.y, self.min.z)
     point8 = Vector3(self.max.x, self.max.y, self.max.z)
     points = [
         point1, point2, point3, point4, point5, point6, point7, point8
     ]
     return points
Пример #7
0
 def from_dict(obj_dict):
     obj = Pose3()
     obj.frame_id = obj_dict['frame_id']
     obj.position = Vector3.from_dict(obj_dict['position'])
     obj.orientation = Vector3.from_dict(obj_dict['orientation'])
     return obj
Пример #8
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we get the z-projections of the objects that are already on the surface
        object_polygons = list()
        for obj in self.objects:
            polygon = obj.get_z_projection()
            object_polygons.append(polygon)

        # we generate samples in the free space on the surface, i.e.
        # we ignore samples that cause collisions with the other objects
        collected_samples = 0
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random position on the surface
            position = Vector2.random_vector(self.surface.bbox.min,
                                             self.surface.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2. * np.pi)
            obj_copy.rotate_around_z(z_orientation)

            # we check if the object would collide with any of the other
            # objects in the newly generated pose
            manipulated_object_polygon = obj_copy.get_z_projection()
            collision = False
            for obj in object_polygons:
                if obj.intersects(manipulated_object_polygon):
                    collision = True
                    break

            # we take the generated pose as a valid candidate if the
            # object doesn't collide with any of the objects on the surface
            if not collision:
                position = Vector3(position.x, position.y,
                                   self.surface.pose.position.z)
                orientation = Vector3(obj_copy.pose.orientation.x,
                                      obj_copy.pose.orientation.y,
                                      z_orientation)
                pose = Pose3(self.manipulated_object.pose.frame_id, position,
                             orientation)
                candidate_poses.append(pose)
                collected_samples += 1

        success_probabilities = np.ones(number_of_samples) / (
            number_of_samples * 1.)
        return {
            'candidate_poses': candidate_poses,
            'success_probabilities': success_probabilities
        }
Пример #9
0
    cmap_factor = 100.
    colormap = plt.cm.RdYlGn(probs * cmap_factor)[:, 0:3]
    positions = np.zeros((len(poses), 2))
    for i, pose in enumerate(poses):
        positions[i] = np.array([pose.position.x, pose.position.y])
    plt.scatter(positions[:, 0], positions[:, 1], c=colormap, zorder=1000)

    plt.xlim([surface_coords[0] - 1., surface_coords[0] + surface_width + 1.])
    plt.ylim([surface_coords[1] - 1., surface_coords[1] + surface_height + 1.])
    plt.show()


if __name__ == '__main__':
    robot_pose = Pose3(frame_id='odom_combined',
                       position=Vector3(0.85, 0., 0.),
                       orientation=Vector3(0., 0., 0.))

    model = Proximity(frame_id=obj_config.frame_id,
                      manipulated_object=obj_config.manipulated_obj,
                      objects_on_surface=obj_config.static_objs,
                      surface=obj_config.surface,
                      robot_pose=robot_pose)

    number_of_samples = 1000
    model_results = model.generate_data(number_of_samples)
    poses = model_results['candidate_poses']
    probs = model_results['success_probabilities']
    plot_points(obj_config.surface, obj_config.static_objs, poses, probs)

    timestamp = int(round(time.time()) * 1000)
Пример #10
0
 def __init__(self, min_values=Vector3(), max_values=Vector3()):
     self.min = deepcopy(min_values)
     self.max = deepcopy(max_values)
Пример #11
0
 def from_dict(obj_dict):
     obj = BBox3()
     obj.min = Vector3.from_dict(obj_dict['min'])
     obj.max = Vector3.from_dict(obj_dict['max'])
     return obj
Пример #12
0
    along with action-execution. If not, see <http://www.gnu.org/licenses/>.
'''

from action_execution.geometry.vector import Vector3
from action_execution.geometry.bbox import BBox3
from action_execution.geometry.pose import Pose3
from action_execution.geometry.object import Object3d
from action_execution.geometry.plane import Plane
from action_execution.utils.configuration import Configuration

obj_config = Configuration()
obj_config.frame_id = 'odom'

static_obj1 = Object3d(obj_type='ketchup_bottle',
                       pose=Pose3(frame_id='odom',
                                  position=Vector3(-0.75, -0.25, 0.75),
                                  orientation=Vector3(0., 0., 0.)),
                       bbox=BBox3(Vector3(-0.789, -0.289, 0.75),
                                  Vector3(-0.711, -0.211, 0.958)))
static_obj2 = Object3d(obj_type='ketchup_bottle',
                       pose=Pose3(frame_id='odom',
                                  position=Vector3(-1., -0.1, 0.75),
                                  orientation=Vector3(0., 0., 0.)),
                       bbox=BBox3(Vector3(-1.039, -0.139, 0.75),
                                  Vector3(-0.961, -0.061, 0.958)))
static_obj3 = Object3d(obj_type='book',
                       pose=Pose3(frame_id='odom',
                                  position=Vector3(-1., 0.5, 0.75),
                                  orientation=Vector3(0., 0., 1.57)),
                       bbox=BBox3(Vector3(-1.225, 0.635, 0.75),
                                  Vector3(-0.775, 0.365, 0.798)))
Пример #13
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we get the z-projections of the objects that are already on the surface
        object_polygons = list()
        for obj in self.objects:
            polygon = obj.get_z_projection()
            object_polygons.append(polygon)

        # we generate samples that are closest to objects of the same type, i.e.
        # we ignore samples that are closest to objects of other types
        collected_samples = 0
        distances = np.zeros(number_of_samples)
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random position on the surface
            position = Vector2.random_vector(self.surface.bbox.min, self.surface.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2.*np.pi)
            obj_copy.rotate_around_z(z_orientation)

            position = Vector3(position.x,
                               position.y,
                               self.surface.pose.position.z)
            orientation = Vector3(obj_copy.pose.orientation.x,
                                  obj_copy.pose.orientation.y,
                                  z_orientation)

            obj_distances = np.zeros(len(self.objects))
            for i, obj in enumerate(self.objects):
                obj_distances[i] = position.distance(obj.pose.position)
            min_dist_idx = np.argmin(obj_distances)

            # TODO: use a knowledge base to replace the equality check with a
            # broader test about the object categories
            if self.object_types[min_dist_idx] == self.manipulated_object.type:
                pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
                candidate_poses.append(pose)
                distances[collected_samples] = obj_distances[min_dist_idx]
                collected_samples += 1

        # we value closer poses higher than farther ones;
        # the results are treated as success probabilities,
        # though the values actually encode preference
        success_probabilities = np.max(distances) / distances
        success_probabilities /= np.sum(success_probabilities)

        return {'candidate_poses': candidate_poses,
                'success_probabilities': success_probabilities}
Пример #14
0
    handle_size_y = np.mean(handle_bb_maxs[:,1] - handle_bb_mins[:,1])
    handle_size_z = np.mean(handle_bb_maxs[:,2] - handle_bb_mins[:,2])
    failure_search_params = FailureSearchParams(parameter_stds=np.array([handle_size_x/10,
                                                                         handle_size_y/10,
                                                                         handle_size_z/10]),
                                                max_sample_count=200,
                                                range_increase_percentage=5)

    print('Diagnosing failed executions and suggesting corrections...')
    original_goal_positions = []
    alternative_experiences = []
    for i in range(goal_positions.shape[0]):
        if labels[i] == 1: continue

        gripper_position = Vector3(x=goal_positions[i,0],
                                   y=goal_positions[i,1],
                                   z=goal_positions[i,2])
        gripper_orientation = Vector3(x=goal_orientations[i,0],
                                      y=goal_orientations[i,1],
                                      z=goal_orientations[i,2])

        gripper_pose = Pose3(position=gripper_position,
                             orientation=gripper_orientation)

        bb_min = Vector3(x=handle_bb_mins[i,0],
                         y=handle_bb_mins[i,1],
                         z=handle_bb_mins[i,2])
        bb_max = Vector3(x=handle_bb_maxs[i,0],
                         y=handle_bb_maxs[i,1],
                         z=handle_bb_maxs[i,2])