Exemple #1
0
    def extract_predicate_values(self, data_point_count: int,
                                 handle_positions: np.ndarray,
                                 goal_positions: np.ndarray,
                                 motion_durations: np.ndarray) -> np.ndarray:
        predicate_values = np.zeros(
            (data_point_count,
             MetaPredicateData.get_predicate_count(PullingPredicateLibrary) +
             1),
            dtype=int)
        predicates = MetaPredicateData.get_predicates(PullingPredicateLibrary)
        for i in range(data_point_count):
            handle_position = Vector3(x=handle_positions[i, 0],
                                      y=handle_positions[i, 1],
                                      z=handle_positions[i, 2])
            handle_orientation = Vector3(x=0., y=0., z=0.)
            handle_pose = Pose3(position=handle_position,
                                orientation=handle_orientation)

            goal_position = Vector3(x=goal_positions[i, 0],
                                    y=goal_positions[i, 1],
                                    z=goal_positions[i, 2])
            goal_orientation = Vector3(x=0., y=0., z=0.)
            goal_pose = Pose3(position=goal_position,
                              orientation=goal_orientation)

            for j, p in enumerate(predicates):
                predicate_values[i, j] = p(handle_pose, goal_pose)

            predicate_values[i, -1] = PullingPredicateLibrary.qualitative_time(
                motion_durations[i])
        return predicate_values
Exemple #2
0
    def extract_predicate_values(self, data_point_count: int,
                                 goal_positions: np.ndarray,
                                 goal_orientations: np.ndarray,
                                 handle_bb_mins: np.ndarray,
                                 handle_bb_maxs: np.ndarray) -> np.ndarray:
        predicate_values = np.zeros(
            (data_point_count,
             MetaPredicateData.get_predicate_count(GraspingPredicateLibrary)),
            dtype=int)
        predicates = MetaPredicateData.get_predicates(GraspingPredicateLibrary)
        for i in range(data_point_count):
            gripper_position = Vector3(x=goal_positions[i, 0],
                                       y=goal_positions[i, 1],
                                       z=goal_positions[i, 2])
            gripper_orientation = Vector3(x=goal_orientations[i, 0],
                                          y=goal_orientations[i, 1],
                                          z=goal_orientations[i, 2])
            gripper_pose = Pose3(position=gripper_position,
                                 orientation=gripper_orientation)

            bb_min = Vector3(x=handle_bb_mins[i, 0],
                             y=handle_bb_mins[i, 1],
                             z=handle_bb_mins[i, 2])
            bb_max = Vector3(x=handle_bb_maxs[i, 0],
                             y=handle_bb_maxs[i, 1],
                             z=handle_bb_maxs[i, 2])
            handle_bbox = BBox3(min_values=bb_min, max_values=bb_max)

            for j, p in enumerate(predicates):
                predicate_values[i, j] = p(gripper_pose, handle_bbox)
        return predicate_values
 def from_dict(obj_dict):
     obj = Object3d()
     obj.id = obj_dict['id']
     obj.type = obj_dict['type']
     obj.pose = Pose3.from_dict(obj_dict['pose'])
     obj.bbox = BBox3.from_dict(obj_dict['bounding_box'])
     return obj
Exemple #4
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we get the z-projections of the objects that are already on the surface
        object_polygons = list()
        for obj in self.objects:
            polygon = obj.get_z_projection()
            object_polygons.append(polygon)

        # we generate samples in the free space on the surface, i.e.
        # we ignore samples that cause collisions with the other objects
        collected_samples = 0
        distances = np.zeros(number_of_samples)
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random position on the surface
            position = Vector2.random_vector(self.surface.bbox.min, self.surface.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2.*np.pi)
            obj_copy.rotate_around_z(z_orientation)

            # we take the generated pose as a valid candidate if the
            # object doesn't collide with any of the objects on the surface
            position = Vector3(position.x,
                               position.y,
                               self.surface.pose.position.z)
            orientation = Vector3(obj_copy.pose.orientation.x,
                                  obj_copy.pose.orientation.y,
                                  z_orientation)
            pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
            candidate_poses.append(pose)

            distances[collected_samples] = position.distance(self.robot_pose.position)
            collected_samples += 1

        # we value closer poses higher than farther ones;
        # the results are treated as success probabilities,
        # though the values actually encode preference
        success_probabilities = np.max(distances) / distances
        success_probabilities /= np.sum(success_probabilities)

        return {'candidate_poses': candidate_poses,
                'success_probabilities': success_probabilities}
Exemple #5
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we generate samples over the target object
        collected_samples = 0
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random 2D position over the target object
            position = Vector2.random_vector(self.target_object.bbox.min,
                                             self.target_object.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2.*np.pi)
            obj_copy.rotate_around_z(z_orientation)

            position = Vector3(position.x,
                               position.y,
                               self.target_object.bbox.max.z)
            orientation = Vector3(obj_copy.pose.orientation.x,
                                  obj_copy.pose.orientation.y,
                                  z_orientation)
            pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
            candidate_poses.append(pose)
            collected_samples += 1

        success_probabilities = np.ones(number_of_samples) / (number_of_samples * 1.)
        return {'candidate_poses': candidate_poses,
                'success_probabilities': success_probabilities}
Exemple #6
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we get the z-projections of the objects that are already on the surface
        object_polygons = list()
        for obj in self.objects:
            polygon = obj.get_z_projection()
            object_polygons.append(polygon)

        # we generate samples in the free space on the surface, i.e.
        # we ignore samples that cause collisions with the other objects
        collected_samples = 0
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random position on the surface
            position = Vector2.random_vector(self.surface.bbox.min,
                                             self.surface.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2. * np.pi)
            obj_copy.rotate_around_z(z_orientation)

            # we check if the object would collide with any of the other
            # objects in the newly generated pose
            manipulated_object_polygon = obj_copy.get_z_projection()
            collision = False
            for obj in object_polygons:
                if obj.intersects(manipulated_object_polygon):
                    collision = True
                    break

            # we take the generated pose as a valid candidate if the
            # object doesn't collide with any of the objects on the surface
            if not collision:
                position = Vector3(position.x, position.y,
                                   self.surface.pose.position.z)
                orientation = Vector3(obj_copy.pose.orientation.x,
                                      obj_copy.pose.orientation.y,
                                      z_orientation)
                pose = Pose3(self.manipulated_object.pose.frame_id, position,
                             orientation)
                candidate_poses.append(pose)
                collected_samples += 1

        success_probabilities = np.ones(number_of_samples) / (
            number_of_samples * 1.)
        return {
            'candidate_poses': candidate_poses,
            'success_probabilities': success_probabilities
        }
Exemple #7
0
    cmap_factor = 100.
    colormap = plt.cm.RdYlGn(probs * cmap_factor)[:, 0:3]
    positions = np.zeros((len(poses), 2))
    for i, pose in enumerate(poses):
        positions[i] = np.array([pose.position.x, pose.position.y])
    plt.scatter(positions[:, 0], positions[:, 1], c=colormap, zorder=1000)

    plt.xlim([surface_coords[0] - 1., surface_coords[0] + surface_width + 1.])
    plt.ylim([surface_coords[1] - 1., surface_coords[1] + surface_height + 1.])
    plt.show()


if __name__ == '__main__':
    robot_pose = Pose3(frame_id='odom_combined',
                       position=Vector3(0.85, 0., 0.),
                       orientation=Vector3(0., 0., 0.))

    model = Proximity(frame_id=obj_config.frame_id,
                      manipulated_object=obj_config.manipulated_obj,
                      objects_on_surface=obj_config.static_objs,
                      surface=obj_config.surface,
                      robot_pose=robot_pose)

    number_of_samples = 1000
    model_results = model.generate_data(number_of_samples)
    poses = model_results['candidate_poses']
    probs = model_results['success_probabilities']
    plot_points(obj_config.surface, obj_config.static_objs, poses, probs)

    timestamp = int(round(time.time()) * 1000)
Exemple #8
0
    cmap_factor = len(probs)
    colormap = plt.cm.RdYlGn(probs * cmap_factor)[:, 0:3]
    positions = np.zeros((len(poses), 2))
    for i, pose in enumerate(poses):
        positions[i] = np.array([pose.position.x, pose.position.y])
    plt.scatter(positions[:, 0], positions[:, 1], c=colormap, zorder=1000)

    plt.xlim([surface_coords[0] - 1., surface_coords[0] + surface_width + 1.])
    plt.ylim([surface_coords[1] - 1., surface_coords[1] + surface_height + 1.])
    plt.show()


if __name__ == '__main__':
    robot_pose = Pose3(frame_id='odom',
                       position=Vector3(0., 0., 0.),
                       orientation=Vector3(0., 0., 0.))

    arm_name = 'arm'
    action_model = Action(action_name='Release')

    print()
    print('Action config')
    print('-------------')
    action_model.print_config()

    number_of_samples = 10
    results = action_model.get_execution_guidelines(
        data_count=number_of_samples,
        frame_id=obj_config.frame_id,
        manipulated_object=obj_config.manipulated_obj,
Exemple #9
0
 def from_dict(obj_dict):
     obj = Plane()
     obj.id = obj_dict['id']
     obj.pose = Pose3.from_dict(obj_dict['pose'])
     obj.bbox = BBox3.from_dict(obj_dict['bounding_box'])
     return obj
Exemple #10
0
 def __init__(self, plane_id='', pose=Pose3(), bbox=BBox3()):
     self.id = plane_id
     self.pose = deepcopy(pose)
     self.bbox = deepcopy(bbox)
Exemple #11
0
    along with action-execution. If not, see <http://www.gnu.org/licenses/>.
'''

from action_execution.geometry.vector import Vector3
from action_execution.geometry.bbox import BBox3
from action_execution.geometry.pose import Pose3
from action_execution.geometry.object import Object3d
from action_execution.geometry.plane import Plane
from action_execution.utils.configuration import Configuration

obj_config = Configuration()
obj_config.frame_id = 'odom'

static_obj1 = Object3d(obj_type='ketchup_bottle',
                       pose=Pose3(frame_id='odom',
                                  position=Vector3(-0.75, -0.25, 0.75),
                                  orientation=Vector3(0., 0., 0.)),
                       bbox=BBox3(Vector3(-0.789, -0.289, 0.75),
                                  Vector3(-0.711, -0.211, 0.958)))
static_obj2 = Object3d(obj_type='ketchup_bottle',
                       pose=Pose3(frame_id='odom',
                                  position=Vector3(-1., -0.1, 0.75),
                                  orientation=Vector3(0., 0., 0.)),
                       bbox=BBox3(Vector3(-1.039, -0.139, 0.75),
                                  Vector3(-0.961, -0.061, 0.958)))
static_obj3 = Object3d(obj_type='book',
                       pose=Pose3(frame_id='odom',
                                  position=Vector3(-1., 0.5, 0.75),
                                  orientation=Vector3(0., 0., 1.57)),
                       bbox=BBox3(Vector3(-1.225, 0.635, 0.75),
                                  Vector3(-0.775, 0.365, 0.798)))
Exemple #12
0
    You should have received a copy of the GNU General Public License
    along with action-execution. If not, see <http://www.gnu.org/licenses/>.
'''

from action_execution.geometry.vector import Vector3
from action_execution.geometry.bbox import BBox3
from action_execution.geometry.pose import Pose3
from action_execution.geometry.object import Object3d
from action_execution.geometry.plane import Plane
from action_execution.utils.configuration import Configuration

obj_config = Configuration()
obj_config.frame_id = 'odom'

static_obj1 = Object3d(pose=Pose3(frame_id='odom',
                                  position=Vector3(-0.75, -0.25, 0.75),
                                  orientation=Vector3(0., 0., 0.)),
                       bbox=BBox3(Vector3(-0.789, -0.289, 0.75),
                                  Vector3(-0.711, -0.211, 0.958)))
static_obj2 = Object3d(pose=Pose3(frame_id='odom',
                                  position=Vector3(-0.75, 0.05, 0.75),
                                  orientation=Vector3(0., 0., 0.)),
                       bbox=BBox3(Vector3(-0.789, 0.011, 0.75),
                                  Vector3(-0.711, 0.089, 0.958)))
obj_config.static_objs = [static_obj1, static_obj2]

obj_config.manipulated_obj = Object3d(
    pose=Pose3(frame_id='odom',
               position=Vector3(-0.126, 0.190, 0.851),
               orientation=Vector3(0., 0., 0.)),
    bbox=BBox3(Vector3(-0.164, 0.151, 0.849), Vector3(-0.087, 0.229, 1.057)))
Exemple #13
0
    def generate_data(self, number_of_samples):
        '''Generates a set of samples

        Keyword arguments:
        number_of_samples -- number of samples to generate

        Returns:
        candidate_poses -- a list of 'action_execution.geometry.pose.Pose3' objects

        '''
        candidate_poses = list()

        # we get the z-projections of the objects that are already on the surface
        object_polygons = list()
        for obj in self.objects:
            polygon = obj.get_z_projection()
            object_polygons.append(polygon)

        # we generate samples that are closest to objects of the same type, i.e.
        # we ignore samples that are closest to objects of other types
        collected_samples = 0
        distances = np.zeros(number_of_samples)
        while collected_samples < number_of_samples:
            obj_copy = deepcopy(self.manipulated_object)

            # we generate a random position on the surface
            position = Vector2.random_vector(self.surface.bbox.min, self.surface.bbox.max)
            obj_copy.planar_translate_to(position)

            # we generate a random orientation around z;
            # we don't rotate the object around the other axes
            z_orientation = np.random.uniform(0., 2.*np.pi)
            obj_copy.rotate_around_z(z_orientation)

            position = Vector3(position.x,
                               position.y,
                               self.surface.pose.position.z)
            orientation = Vector3(obj_copy.pose.orientation.x,
                                  obj_copy.pose.orientation.y,
                                  z_orientation)

            obj_distances = np.zeros(len(self.objects))
            for i, obj in enumerate(self.objects):
                obj_distances[i] = position.distance(obj.pose.position)
            min_dist_idx = np.argmin(obj_distances)

            # TODO: use a knowledge base to replace the equality check with a
            # broader test about the object categories
            if self.object_types[min_dist_idx] == self.manipulated_object.type:
                pose = Pose3(self.manipulated_object.pose.frame_id, position, orientation)
                candidate_poses.append(pose)
                distances[collected_samples] = obj_distances[min_dist_idx]
                collected_samples += 1

        # we value closer poses higher than farther ones;
        # the results are treated as success probabilities,
        # though the values actually encode preference
        success_probabilities = np.max(distances) / distances
        success_probabilities /= np.sum(success_probabilities)

        return {'candidate_poses': candidate_poses,
                'success_probabilities': success_probabilities}
 def __init__(self, obj_id='', obj_type='', pose=Pose3(), bbox=BBox3()):
     self.id = obj_id
     self.type = obj_type
     self.pose = deepcopy(pose)
     self.bbox = deepcopy(bbox)
Exemple #15
0
                                                range_increase_percentage=5)

    print('Diagnosing failed executions and suggesting corrections...')
    original_goal_positions = []
    alternative_experiences = []
    for i in range(goal_positions.shape[0]):
        if labels[i] == 1: continue

        gripper_position = Vector3(x=goal_positions[i,0],
                                   y=goal_positions[i,1],
                                   z=goal_positions[i,2])
        gripper_orientation = Vector3(x=goal_orientations[i,0],
                                      y=goal_orientations[i,1],
                                      z=goal_orientations[i,2])

        gripper_pose = Pose3(position=gripper_position,
                             orientation=gripper_orientation)

        bb_min = Vector3(x=handle_bb_mins[i,0],
                         y=handle_bb_mins[i,1],
                         z=handle_bb_mins[i,2])
        bb_max = Vector3(x=handle_bb_maxs[i,0],
                         y=handle_bb_maxs[i,1],
                         z=handle_bb_maxs[i,2])

        handle_bbox = BBox3(min_values=bb_min,
                            max_values=bb_max)

        diagnoses, experience_correction = execution_model.correct_failed_experience(predicate_library=GraspingPredicateLibrary,
                                                                                     state_update_fn=state_update_fn,
                                                                                     failure_search_params=failure_search_params,
                                                                                     diagnosis_repetition_count=50,