Example #1
0
    def from_feature_vec(v,
                         camera_intr=None,
                         angle=None,
                         depth=None,
                         axis=None):
        """Creates a `SuctionPoint2D` instance from a feature vector and
        additional parameters.

        Parameters
        ----------
        v : :obj:`numpy.ndarray`
            Feature vector, see `Grasp2D.feature_vec`.
        camera_intr : :obj:`perception.CameraIntrinsics`
            Frame of reference for camera that the grasp corresponds to.
        depth : float
            Hard-set the depth for the suction grasp.
        axis : :obj:`numpy.ndarray`
            Normalized 3-vector specifying the approach direction.
        """
        # Read feature vec.
        center_px = v[:2]

        grasp_angle = 0
        if v.shape[0] > 2 and angle is None:
            # grasp_angle = v[2]
            grasp_vec = v[2:]
            grasp_vec = grasp_vec / np.linalg.norm(grasp_vec)
            grasp_angle = np.arctan2(grasp_vec[1], grasp_vec[0])
        elif angle is not None:
            grasp_angle = angle

        grasp_axis = np.array([1, 0, 0])
        if axis is not None:
            grasp_axis = axis

        grasp_depth = 0.5
        if depth is not None:
            grasp_depth = depth

        x_axis = grasp_axis
        y_axis = np.array([grasp_axis[1], -grasp_axis[0], 0])
        if np.linalg.norm(y_axis) == 0:
            y_axis = np.array([1, 0, 0])
        y_axis = y_axis / np.linalg.norm(y_axis)
        z_axis = np.cross(x_axis, y_axis)

        R = np.array([x_axis, y_axis, z_axis]).T
        R = R.dot(RigidTransform.x_axis_rotation(grasp_angle))
        t = camera_intr.deproject_pixel(
            grasp_depth, Point(center_px, frame=camera_intr.frame)).data
        T = RigidTransform(rotation=R,
                           translation=t,
                           from_frame="grasp",
                           to_frame=camera_intr.frame)

        # Compute center and angle.
        return MultiSuctionPoint2D(T, camera_intr=camera_intr)
Example #2
0
    def turn_handle(self, visualize=False):
        start_pose = ut.get_link_pose(self.robot, self.grasp_joint)
        start_quat = start_pose[1]
        amount_rotate = np.pi / 4.
        rot_y = RigidTransform.x_axis_rotation(amount_rotate)
        end_rot = np.dot(rot_y, Quaternion(start_quat).rotation_matrix)
        end_quat = Quaternion(matrix=end_rot).elements
        step_size = np.pi / 16.
        quat_waypoints = ut.get_quaternion_waypoints(start_pose, start_quat,
                                                     end_quat)
        theta = np.pi
        end_theta = np.pi + amount_rotate
        n_waypoints = np.round((end_theta - theta) / (step_size))
        thetas = np.linspace(theta, end_theta, n_waypoints)
        r = 0.10
        center = self.door_knob_center.copy()
        center[1] -= 0.055  #franka_tool to 9/10 pose
        pos_traj = []
        for theta, quat in zip(thetas, quat_waypoints):
            new_pt = (center[0] + r * np.cos(theta), center[1],
                      center[2] + r * np.sin(theta))
            pos_traj.append((new_pt, quat[1]))
            sph = ut.create_sphere(0.025)
            ut.set_point(sph, new_pt),
        joints_to_plan_for = ut.get_movable_joints(self.robot)[:-2]
        traj = []
        for pos in pos_traj:
            new_jts = ut.inverse_kinematics(
                self.robot,
                self.grasp_joint,
                pos,
                movable_joints=joints_to_plan_for,
                null_space=self.get_null_space(joints_to_plan_for),
                ori_tolerance=0.02)
            if new_jts is not None:
                traj.append(new_jts)
            else:
                new_jts = ut.inverse_kinematics(
                    self.robot,
                    self.grasp_joint,
                    pos,
                    movable_joints=joints_to_plan_for,
                    null_space=None,
                    ori_tolerance=0.03)
                if new_jts is None:
                    print("no jt solution found")
                else:
                    traj.append(new_jts)

        assert (len(traj) > 0)
        if visualize:
            self.visualize_traj(traj)
        n_pts = min(10, len(traj))
        mask = np.round(np.linspace(0, len(traj) - 1, n_pts)).astype(np.int32)
        traj = np.array(traj)[mask]
        assert (traj is not None)
        return traj
Example #3
0
def straighten_transform(rt):
    angles = rt.euler_angles
    roll_off = angles[0]
    pitch_off = angles[1]
    roll_fix = RigidTransform(
        rotation=RigidTransform.x_axis_rotation(np.pi - roll_off),
        from_frame=rt.from_frame,
        to_frame=rt.from_frame)
    pitch_fix = RigidTransform(
        rotation=RigidTransform.y_axis_rotation(pitch_off),
        from_frame=rt.from_frame,
        to_frame=rt.from_frame)
    new_rt = rt * roll_fix * pitch_fix
    return new_rt
    def _sample_suction_points(self,
                               depth_im,
                               camera_intr,
                               num_samples,
                               segmask=None,
                               visualize=False,
                               constraint_fn=None):
        """Sample a set of 2D suction point candidates from a depth image by
        choosing points on an object surface uniformly at random
        and then sampling around the surface normal.

        Parameters
        ----------
        depth_im : :obj:"perception.DepthImage"
            Depth image to sample from.
        camera_intr : :obj:`perception.CameraIntrinsics`
            Intrinsics of the camera that captured the images.
        num_samples : int
            Number of grasps to sample.
        segmask : :obj:`perception.BinaryImage`
            Binary image segmenting out the object of interest.
        visualize : bool
            Whether or not to show intermediate samples (for debugging).
        constraint_fn : :obj:`GraspConstraintFn`
            Constraint function to apply to grasps.

        Returns
        -------
        :obj:`list` of :obj:`SuctionPoint2D`
            List of 2D suction point candidates.
        """
        # Compute edge pixels.
        filter_start = time()
        if self._depth_gaussian_sigma > 0:
            depth_im_mask = depth_im.apply(snf.gaussian_filter,
                                           sigma=self._depth_gaussian_sigma)
        else:
            depth_im_mask = depth_im.copy()
        if segmask is not None:
            depth_im_mask = depth_im.mask_binary(segmask)
        self._logger.debug("Filtering took %.3f sec" % (time() - filter_start))

        if visualize:
            vis.figure()
            vis.subplot(1, 2, 1)
            vis.imshow(depth_im)
            vis.subplot(1, 2, 2)
            vis.imshow(depth_im_mask)
            vis.show()

        # Project to get the point cloud.
        cloud_start = time()
        point_cloud_im = camera_intr.deproject_to_image(depth_im_mask)
        normal_cloud_im = point_cloud_im.normal_cloud_im()
        nonzero_px = depth_im_mask.nonzero_pixels()
        num_nonzero_px = nonzero_px.shape[0]
        if num_nonzero_px == 0:
            return []
        self._logger.debug("Normal cloud took %.3f sec" %
                           (time() - cloud_start))

        # Randomly sample points and add to image.
        sample_start = time()
        suction_points = []
        k = 0
        sample_size = min(self._max_num_samples, num_nonzero_px)
        indices = np.random.choice(num_nonzero_px,
                                   size=sample_size,
                                   replace=False)
        while k < sample_size and len(suction_points) < num_samples:
            # Sample a point uniformly at random.
            ind = indices[k]
            center_px = np.array([nonzero_px[ind, 1], nonzero_px[ind, 0]])
            center = Point(center_px, frame=camera_intr.frame)
            axis = -normal_cloud_im[center.y, center.x]
            #            depth = point_cloud_im[center.y, center.x][2]
            orientation = 2 * np.pi * np.random.rand()

            # Update number of tries.
            k += 1

            # Skip bad axes.
            if np.linalg.norm(axis) == 0:
                continue

            # Rotation matrix.
            x_axis = axis
            y_axis = np.array([axis[1], -axis[0], 0])
            if np.linalg.norm(y_axis) == 0:
                y_axis = np.array([1, 0, 0])
            y_axis = y_axis / np.linalg.norm(y_axis)
            z_axis = np.cross(x_axis, y_axis)
            R = np.array([x_axis, y_axis, z_axis]).T
            #            R_orig = np.copy(R)
            R = R.dot(RigidTransform.x_axis_rotation(orientation))
            t = point_cloud_im[center.y, center.x]
            pose = RigidTransform(rotation=R,
                                  translation=t,
                                  from_frame="grasp",
                                  to_frame=camera_intr.frame)

            # Check center px dist from boundary.
            if (center_px[0] < self._min_dist_from_boundary
                    or center_px[1] < self._min_dist_from_boundary
                    or center_px[1] >
                    depth_im.height - self._min_dist_from_boundary
                    or center_px[0] >
                    depth_im.width - self._min_dist_from_boundary):
                continue

            # Keep if the angle between the camera optical axis and the suction
            # direction is less than a threshold.
            dot = max(min(axis.dot(np.array([0, 0, 1])), 1.0), -1.0)
            psi = np.arccos(dot)
            if psi < self._max_suction_dir_optical_axis_angle:

                # Check distance to ensure sample diversity.
                candidate = MultiSuctionPoint2D(pose, camera_intr=camera_intr)

                # Check constraint satisfaction.
                if constraint_fn is None or constraint_fn(candidate):
                    if visualize:
                        vis.figure()
                        vis.imshow(depth_im)
                        vis.scatter(center.x, center.y)
                        vis.show()

                    suction_points.append(candidate)
        self._logger.debug("Loop took %.3f sec" % (time() - sample_start))
        return suction_points
def process_frame(frame_json, camera_intrinsics, models):
    num_objects = len(frame_json['objects'])
    if len(frame_json['objects']) == 0:
        print "no objects in frame!"
        return frame_json

    # copy relevant fields of json
    updated_frame_json = {'camera_data': {}, 'objects': []}
    if frame_json['camera_data']:
        updated_frame_json['camera_data'][
            'location_worldframe'] = copy.deepcopy(
                frame_json['camera_data']['location_worldframe'])
        updated_frame_json['camera_data'][
            'quaternion_xyzw_worldframe'] = copy.deepcopy(
                frame_json['camera_data']['quaternion_xyzw_worldframe'])
    else:
        print 'Warning: no `camera_data` field!'

    for i in range(len(frame_json['objects'])):
        updated_frame_json['objects'].append({})
        updated_frame_json['objects'][i]['class'] = copy.deepcopy(
            frame_json['objects'][i]['class'])
        updated_frame_json['objects'][i]['bounding_box'] = copy.deepcopy(
            frame_json['objects'][i]['bounding_box'])
        updated_frame_json['objects'][i]['instance_id'] = copy.deepcopy(
            frame_json['objects'][i]['instance_id'])
        updated_frame_json['objects'][i]['visibility'] = copy.deepcopy(
            frame_json['objects'][i]['visibility'])

    # get object poses and flip symmetrical objects if necessary
    object_poses = []
    for object_index in range(num_objects):
        model_name = updated_frame_json['objects'][object_index]['class']
        scene_object_json = frame_json['objects'][object_index]
        (translation, rotation) = object_pose_from_json(scene_object_json)
        object_pose = RigidTransform(rotation=rotation,
                                     translation=translation,
                                     from_frame='source_model',
                                     to_frame='camera')
        object_pose = object_pose.dot(models[model_name].model_transform)

        # Handle symmetrical objects: if z axis (= x axis in mesh) points towards camera,
        # rotate by 180 degrees around x (= z axis in mesh).
        # This always keeps the "green/yellow" short side (in nvdu_viz) pointed towards
        # the camera; the "blue/magenta" short side should never be visible. This is necessary
        # for DOPE training, since both sides look the same because the KLT box is symmetrical.
        symmetry_flip_tf = RigidTransform(from_frame='target_model',
                                          to_frame='target_model_original')
        z_axis = rotation.dot(np.array([0.0, 0.0, 1.0], dtype=np.float64))
        if z_axis.dot(translation) < 0:
            symmetry_flip_tf.rotation = RigidTransform.x_axis_rotation(math.pi)
        object_pose = object_pose.dot(symmetry_flip_tf)

        object_poses.append(object_pose)

        updated_frame_json['objects'][object_index][
            'location'] = object_pose.translation.tolist()
        updated_frame_json['objects'][object_index][
            'quaternion_xyzw'] = np.roll(object_pose.quaternion, -1).tolist()

    # add pose_transform_permuted
    for object_index in range(num_objects):
        ptp_json = pose_transform_permuted_to_json(
            object_poses[object_index].translation,
            object_poses[object_index].rotation)
        updated_frame_json['objects'][object_index].update(ptp_json)

    # compute cuboid, cuboid_centroid, projected_cuboid, projected_cuboid_centroid, bounding_box
    for object_index in range(num_objects):
        model_name = updated_frame_json['objects'][object_index]['class']
        cuboid_json = get_cuboid(object_poses[object_index],
                                 models[model_name].cuboid_dimensions,
                                 camera_intrinsics)
        updated_frame_json['objects'][object_index].update(cuboid_json)

    return updated_frame_json
    print('Opening gripper all the way')
    fa.open_gripper()

    # joint controls
    print('Rotating last joint')
    joints = fa.get_joints()
    joints[6] += np.deg2rad(45)
    fa.goto_joints(joints)
    joints[6] -= np.deg2rad(45)
    fa.goto_joints(joints)

    # end-effector pose control
    print('Translation')
    T_ee_world = fa.get_pose()
    T_ee_world.translation += [0.1, 0, 0.1]
    fa.goto_pose(T_ee_world)
    T_ee_world.translation -= [0.1, 0, 0.1]
    fa.goto_pose(T_ee_world)

    print('Rotation in end-effector frame')
    T_ee_rot = RigidTransform(
        rotation=RigidTransform.x_axis_rotation(np.deg2rad(45)),
        from_frame='franka_tool', to_frame='franka_tool'
    )
    T_ee_world_target = T_ee_world * T_ee_rot
    fa.goto_pose(T_ee_world_target)
    fa.goto_pose(T_ee_world)

    # reset franka back to home
    fa.reset_joints()