Example #1
0
    def predict(self):
        rot, trans = RigidTransform.rotation_and_translation_from_matrix(
            self.datapoint['obj_pose'])
        dataset_pose = RigidTransform(rot, trans, 'obj', 'world')
        dataset_final_poses = self.datapoint['final_poses']
        num_vertices = self.datapoint['num_vertices']
        final_poses = []
        for i in range(20):
            final_pose_mat = dataset_final_poses[i * 4:(i + 1) * 4]
            if np.array_equal(final_pose_mat, np.zeros((4, 4))):
                break
            rot, trans = RigidTransform.rotation_and_translation_from_matrix(
                final_pose_mat)
            final_poses.append(RigidTransform(rot, trans, 'obj', 'world'))
        vertices = self.datapoint['vertices'][:num_vertices]
        normals = self.datapoint['normals'][:num_vertices]
        vertices = (self.obj.T_obj_world * dataset_pose.inverse() *
                    PointCloud(vertices.T, 'world')).data.T
        normals = (self.obj.T_obj_world * dataset_pose.inverse() *
                   PointCloud(normals.T, 'world')).data.T

        vertex_probs = self.datapoint[
            'vertex_probs'][:num_vertices, :len(final_poses)]
        required_forces = self.datapoint['min_required_forces'][:num_vertices]

        return vertices, normals, final_poses, vertex_probs, required_forces
    def object_to_camera_pose(self, radius, elev, az, roll):
        """ Convert spherical coords to an object-camera pose. """
        # generate camera center from spherical coords
        camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze()
        camera_z_obj = -camera_center_obj / np.linalg.norm(camera_center_obj)

        # find the canonical camera x and y axes
        camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0])
        if np.linalg.norm(camera_x_par_obj) == 0:
            camera_x_par_obj = np.array([1, 0, 0])
        camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj)
        camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
        camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj)
        if camera_y_par_obj[2] > 0:
            camera_x_par_obj = -camera_x_par_obj
            camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
            camera_y_par_obj = camera_y_par_obj / np.linalg.norm(
                camera_y_par_obj)

        # rotate by the roll
        R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj,
                                 camera_z_obj]
        R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0],
                                        [np.sin(roll),
                                         np.cos(roll), 0], [0, 0, 1]])
        R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera)
        t_obj_camera = camera_center_obj

        # create final transform
        T_obj_camera = RigidTransform(R_obj_camera,
                                      t_obj_camera,
                                      from_frame=self.frame,
                                      to_frame='obj')
        return T_obj_camera.inverse()
Example #3
0
def visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj):
    T_cam_cam = RigidTransform()
    T_obj_cam = T_cam_obj.inverse()
    T_cam_ar = T_ar_cam.inverse()

    Twc = np.matmul(T_world_ar.matrix, T_ar_cam.matrix)
    T_world_cam = RigidTransform(Twc[:3, :3], Twc[:3, 3])
    T_cam_world = T_world_cam.inverse()

    o = np.array([0., 0., 0.])
    centroid_cam = apply_transform(o, T_cam_obj)
    tag_cam = apply_transform(o, T_cam_ar)
    robot_cam = apply_transform(o, T_cam_world)

    # camera
    vis3d.points(o, color=(0, 1, 0), scale=0.01)
    vis3d.pose(T_cam_cam, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # object
    vis3d.mesh(mesh)
    vis3d.points(centroid_cam, color=(0, 0, 0), scale=0.01)
    vis3d.pose(T_cam_obj, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    # AR tag
    vis3d.points(tag_cam, color=(1, 0, 1), scale=0.01)
    vis3d.pose(T_cam_ar, alpha=0.05, tube_radius=0.005, center_scale=0.002)
    vis3d.table(T_cam_ar, dim=0.074)

    # robot
    vis3d.points(robot_cam, color=(1, 0, 0), scale=0.01)
    vis3d.pose(T_cam_world, alpha=0.05, tube_radius=0.005, center_scale=0.002)

    vis3d.show()
Example #4
0
 def T_obj_camera(self):
     """Returns the transformation from camera to object when the object is in the given stable pose.
     Returns
     -------
     :obj:`autolab_core.RigidTransform`
         The desired transform.
     """
     if self.stable_pose is None:
         T_obj_world = RigidTransform(from_frame='obj', to_frame='world')
     else:
         T_obj_world = self.stable_pose.T_obj_table.as_frames(
             'obj', 'world')
     T_camera_obj = T_obj_world.inverse() * self.T_camera_world
     return T_camera_obj
Example #5
0
    def test_inverse(self):
        R_a_b = RigidTransform.random_rotation()
        t_a_b = RigidTransform.random_translation()
        T_a_b = RigidTransform(R_a_b, t_a_b, 'a', 'b')
        T_b_a = T_a_b.inverse()

        # multiple with numpy arrays
        M_a_b = np.r_[np.c_[R_a_b, t_a_b], [[0, 0, 0, 1]]]
        M_b_a = np.linalg.inv(M_a_b)

        self.assertTrue(np.sum(np.abs(T_b_a.matrix - M_b_a)) < 1e-5,
                        msg='Inverse gave incorrect transformation')

        # check frames
        self.assertEqual(T_b_a.from_frame,
                         'b',
                         msg='Inverse has incorrect input frame')
        self.assertEqual(T_b_a.to_frame,
                         'a',
                         msg='Inverse has incorrect output frame')
Example #6
0
def execute_grasp(T_gripper_world, robot, arm, subscriber, config):
    """ Executes a single grasp for the hand pose T_gripper_world up to the point of lifting the object """
    # snap gripper to valid depth
    if T_gripper_world.translation[2] < config['grasping']['min_gripper_depth']:
        T_gripper_world.translation[2] = config['grasping'][
            'min_gripper_depth']

    # get cur pose
    T_cur_world = arm.get_pose()

    # compute approach pose
    t_approach_target = np.array([0, 0, config['grasping']['approach_dist']])
    T_gripper_approach = RigidTransform(translation=t_approach_target,
                                        from_frame='gripper',
                                        to_frame='gripper')
    T_approach_world = T_gripper_world * T_gripper_approach.inverse()
    t_lift_target = np.array([0, 0, config['grasping']['lift_height']])
    T_gripper_lift = RigidTransform(translation=t_lift_target,
                                    from_frame='gripper',
                                    to_frame='gripper')
    T_lift_world = T_gripper_world * T_gripper_lift.inverse()

    # compute lift pose
    t_delta_approach = T_approach_world.translation - T_cur_world.translation

    # perform grasp on the robot, up until the point of lifting
    for _ in range(10):
        _, torques = subscriber.left.get_torque()
    resting_torque = torques[3]
    arm.open_gripper(wait_for_res=True)
    robot.set_z(config['control']['approach_zoning'])
    arm.goto_pose(YMC.L_KINEMATIC_AVOIDANCE_POSE)
    arm.goto_pose(T_approach_world)

    # grasp
    robot.set_v(config['control']['approach_velocity'])
    robot.set_z(config['control']['standard_zoning'])
    if config['control']['test_collision']:
        robot.set_z('z200')
        T_gripper_world.translation[2] = 0.0
        arm.goto_pose(T_gripper_world, wait_for_res=True)
        _, T_cur_gripper_world = subscriber.left.get_pose()
        dist_from_goal = np.linalg.norm(T_cur_gripper_world.translation -
                                        T_gripper_world.translation)
        collision = False
        for i in range(10):
            _, torques = subscriber.left.get_torque()
        while dist_from_goal > 1e-3:
            _, T_cur_gripper_world = subscriber.left.get_pose()
            dist_from_goal = np.linalg.norm(T_cur_gripper_world.translation -
                                            T_gripper_world.translation)
            _, torques = subscriber.left.get_torque()
            print torques
            if torques[1] > 0.001:
                logging.info('Detected collision!!!!!!')
                robot.set_z('fine')
                arm.goto_pose(T_approach_world, wait_for_res=True)
                logging.info('Commanded!!!!!!')
                collision = True
                break
            arm.goto_pose(T_gripper_world, wait_for_res=False)
    else:
        arm.goto_pose(T_gripper_world, wait_for_res=True)

    # pick up object
    arm.close_gripper(force=config['control']['gripper_close_force'],
                      wait_for_res=True)
    pickup_gripper_width = arm.get_gripper_width()
    robot.set_v(config['control']['standard_velocity'])
    robot.set_z(config['control']['standard_zoning'])
    arm.goto_pose(T_lift_world)
    arm.goto_pose(YMC.L_KINEMATIC_AVOIDANCE_POSE, wait_for_res=True)
    arm.goto_pose(YMC.L_PREGRASP_POSE, wait_for_res=True)

    # shake test
    if config['control']['shake_test']:

        # compute shake poses
        radius = config['control']['shake_radius']
        angle = config['control']['shake_angle'] * np.pi
        delta_T = RigidTransform(translation=[0, 0, radius],
                                 from_frame='gripper',
                                 to_frame='gripper')
        R_shake = np.array([[1, 0, 0], [0, np.cos(angle), -np.sin(angle)],
                            [0, np.sin(angle), np.cos(angle)]])
        delta_T_up = RigidTransform(rotation=R_shake,
                                    translation=[0, 0, -radius],
                                    from_frame='gripper',
                                    to_frame='gripper')
        delta_T_down = RigidTransform(rotation=R_shake.T,
                                      translation=[0, 0, -radius],
                                      from_frame='gripper',
                                      to_frame='gripper')
        T_shake_up = YMC.L_PREGRASP_POSE.as_frames(
            'gripper', 'world') * delta_T_up * delta_T
        T_shake_down = YMC.L_PREGRASP_POSE.as_frames(
            'gripper', 'world') * delta_T_down * delta_T

        robot.set_v(config['control']['shake_velocity'])
        robot.set_z(config['control']['shake_zoning'])
        for i in range(config['control']['num_shakes']):
            arm.goto_pose(T_shake_up, wait_for_res=False)
            arm.goto_pose(YMC.L_PREGRASP_POSE, wait_for_res=False)
            arm.goto_pose(T_shake_down, wait_for_res=False)
            arm.goto_pose(YMC.L_PREGRASP_POSE, wait_for_res=False)
        robot.set_v(config['control']['standard_velocity'])
        robot.set_z(config['control']['standard_zoning'])

    # check gripper width
    for _ in range(10):
        _, torques = subscriber.left.get_torque()
        lift_torque = torques[3]
        lift_gripper_width = arm.get_gripper_width()

    # check drops
    lifted_object = False
    if np.abs(lift_gripper_width) > config['grasping']['pickup_min_width']:
        lifted_object = True

    # check lifts
    # table_clear = False
    # for i in range(3):
    #     _, depth_im, _ = sensor.frames()

    return lifted_object, lift_gripper_width, lift_torque
Example #7
0
                                [ 0.00913315, -0.80663693, -0.59097669]])
            tra = np.array( [-0.14501467, -1.161184  ,  0.3797466 ])
            T_ar_cam = RigidTransform(rot, tra, 'ar_marker_2_0', 'base')
            T_ar_cam.from_frame = 'ar_marker_0'

        else:
            print('Getting transform from robot to AR tag...\n')
            T_world_ar = lookup_transform('ar_marker_0', 'base')
            print(T_world_ar)

            print('Getting transform from camera to AR tag...\n')
            T_ar_cam = lookup_transform('camera_depth_optical_frame', 'ar_marker_2_0')
            print(T_ar_cam)
            T_ar_cam.from_frame = 'ar_marker_0'

        T_ar_world = T_world_ar.inverse()
        T_cam_ar = T_ar_cam.inverse()
        ## Load and pre-process mesh ##
        filename = args.obj
        print('Loading mesh {}...\n'.format(filename))

        mesh = trimesh.load_mesh(filename)
        mesh.fix_normals()

        T_cam_obj = RigidTransform(T_cam_ar.rotation, mesh.centroid)
        T_obj_cam = T_cam_obj.inverse()

        ## Visualize the mesh ##
        print('Visualizing mesh (close visualizer window to continue)...\n')
        utils.visualize_scene(mesh, T_world_ar, T_ar_cam, T_cam_obj)
Example #8
0
                            1022.3043212890625,
                            777.7421875,
                            height=1536,
                            width=2048)

    T_tag_camera = april.detect(sensor, intr, vis=cfg['vis_detect'])[0]
    T_tag_world = T_camera_world * T_tag_camera
    logging.info('Tag has translation {}'.format(T_tag_world.translation))
    import ipdb
    ipdb.set_trace()

    T_tag_tool = RigidTransform(rotation=np.eye(3),
                                translation=[0, 0, 0.04],
                                from_frame=T_tag_world.from_frame,
                                to_frame="franka_tool")
    T_tool_world = T_tag_world * T_tag_tool.inverse()
    fa.goto_pose_with_cartesian_control(
        T_tool_world, cartesian_impedances=[2000, 2000, 1000, 300, 300, 300])

    logging.info('Finding closest orthogonal grasp')
    T_grasp_world = get_closest_grasp_pose(T_tag_world, T_ready_world)
    T_lift = RigidTransform(translation=[0, 0, 0.2],
                            from_frame=T_ready_world.to_frame,
                            to_frame=T_ready_world.to_frame)
    T_lift_world = T_lift * T_grasp_world

    logging.info('Visualizing poses')
    _, depth_im, _ = sensor.frames()
    points_world = T_camera_world * intr.deproject(depth_im)

    if cfg['vis_detect']:
Example #9
0
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config["num_patterns"]
    max_tries = config["max_tries"]

    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config["pattern_tf"])

    # initialize sensor
    sensor_frame = config["sensor_frame"]
    sensor_config = {"frame": sensor_frame}
    logging.info("Creating sensor")
    sensor = RgbdSensorFactory.sensor("ensenso", sensor_config)

    # initialize node
    rospy.init_node("register_ensenso", anonymous=True)
    rospy.wait_for_service("/%s/collect_pattern" % (sensor_frame))
    rospy.wait_for_service("/%s/estimate_pattern_pose" % (sensor_frame))

    # start sensor
    print("Starting sensor")
    sensor.start()
    time.sleep(1)
    print("Sensor initialized")

    # perform registration
    try:
        print("Collecting patterns")
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy(
                "/%s/collect_pattern" % (sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer, clear_buffer, decode,
                                   grid_spacing)
            if resp.success:
                print("Detected pattern %d" % (num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError("Failed to detect calibration pattern!")

        print("Estimating pattern pose")
        estimate_pattern_pose = rospy.ServiceProxy(
            "/%s/estimate_pattern_pose" % (sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([
            resp.pose.orientation.w,
            resp.pose.orientation.x,
            resp.pose.orientation.y,
            resp.pose.orientation.z,
        ])
        t_pattern_camera = np.array(
            [resp.pose.position.x, resp.pose.position.y, resp.pose.position.z])
        T_pattern_camera = RigidTransform(
            rotation=q_pattern_camera,
            translation=t_pattern_camera,
            from_frame="pattern",
            to_frame=sensor_frame,
        )
    except rospy.ServiceException as e:
        print("Service call failed: %s" % (str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()

    # save final transformation
    output_dir = os.path.join(config["calib_dir"], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, "%s_to_world.tf" % (sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, "%s.intr" % (sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print("Final Result for sensor %s" % (sensor_frame))
    print("Rotation: ")
    print(T_camera_world.rotation)
    print("Quaternion: ")
    print(T_camera_world.quaternion)
    print("Translation: ")
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config["use_robot"]:
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]])
        t_gripper_world = np.array([
            target_pt_world.x + config["gripper_offset_x"],
            target_pt_world.y + config["gripper_offset_y"],
            target_pt_world.z + config["gripper_offset_z"],
        ])
        T_gripper_world = RigidTransform(
            rotation=R_gripper_world,
            translation=t_gripper_world,
            from_frame="gripper",
            to_frame="world",
        )

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0, 0, 0.05),
                                from_frame="world",
                                to_frame="world")
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)

        # wait for human measurement
        keyboard_input("Take measurement. Hit [ENTER] when done")
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()
Example #10
0
            xend = sx * (nx / 2 - ((nx + 1) % 2) / 2.0 + 1)
            ystart = -sy * (ny / 2 - ((ny + 1) % 2) / 2.0)
            yend = sy * (ny / 2 - ((ny + 1) % 2) / 2.0 + 1)
            filler = np.mgrid[ystart:yend:sy, xstart:xend:sx]
            filler = filler.reshape(
                (filler.shape[0], filler.shape[1] * filler.shape[2])).T
            objp[:, :2] = filler

            ret, rvec, tvec = cv2.solvePnP(objp, webcam_corner_px,
                                           intrinsics.K, None)
            mat, _ = cv2.Rodrigues(rvec)
            T_cb_cam = RigidTransform(mat,
                                      tvec,
                                      from_frame='cb',
                                      to_frame=sensor_frame)
            T_cam_cb = T_cb_cam.inverse()
            T_camera_world = T_cb_world.dot(T_cam_cb)

            logging.info('Final Result for sensor %s' % (sensor_frame))
            logging.info('Translation: ')
            logging.info(T_camera_world.translation)
            logging.info('Rotation: ')
            logging.info(T_camera_world.rotation)

        except Exception as e:
            logging.error('Failed to register sensor {}'.format(sensor_frame))
            traceback.print_exc()
            continue

        # save tranformation arrays based on setup
        output_dir = os.path.join(config['calib_dir'], sensor_frame)
Example #11
0
    from_frame='ycb_model',
    to_frame='source_model')

fmt = {}
for j in target_model_json['exported_objects']:
    if j['class'] == MODEL_NAME:
        fmt = j['fixed_model_transform']
        break
target_fixed_model_transform_mat = np.transpose(np.array(fmt))
target_fixed_model_transform = RigidTransform(
    rotation=target_fixed_model_transform_mat[:3, :3],
    translation=target_fixed_model_transform_mat[:3, 3],
    from_frame='ycb_model',
    to_frame='target_model')
model_transform = fixed_model_transform.dot(
    target_fixed_model_transform.inverse())

(translation,
 rotation) = object_pose_from_json(reference_source_json['objects'][0])
reference_source_pose = RigidTransform(rotation=rotation,
                                       translation=translation,
                                       from_frame='target_model',
                                       to_frame='camera')

(translation,
 rotation) = object_pose_from_json(reference_target_json['objects'][0])
reference_target_pose = RigidTransform(rotation=rotation,
                                       translation=translation,
                                       from_frame='target_model_fixed',
                                       to_frame='camera')
Example #12
0
class FrankaArm:

    def __init__(self, rosnode_name='franka_arm_client'):
        self._connected = False
        self._in_skill = False

        # set signal handler to handle ctrl+c and kill sigs
        signal.signal(signal.SIGINT, self._sigint_handler_gen())

        # init ROS
        rospy.init_node(rosnode_name, disable_signals=True)
        self._sub = FrankaArmSubscriber(new_ros_node=False)
        self._client = actionlib.SimpleActionClient('/execute_skill_action_server_node/execute_skill', ExecuteSkillAction)
        self._client.wait_for_server()
        self._current_goal_handle = None
        self._connected = True

        # set default identity tool delta pose
        self._tool_delta_pose = RigidTransform(from_frame='franka_tool', to_frame='franka_tool_base')

    def _sigint_handler_gen(self):
        def sigint_handler(sig, frame):
            if self._connected and self._in_skill:
                self._client.cancel_goal()
            sys.exit(0)

        return sigint_handler

    def _send_goal(self, goal, cb):
        '''
        Raises:
            FrankaArmCommException if a timeout is reached
        '''
        # TODO(jacky): institute a timeout check

        self._client.send_goal(goal, feedback_cb=cb)
        self._in_skill = True
        self._client.wait_for_result()
        self._in_skill = False
        return self._client.get_result()

    '''
    Controls
    '''

    def goto_pose(self, tool_pose, duration=3):
        '''Commands Arm to the given pose via linear interpolation

        Args:
            tool_pose (RigidTransform) : End-effector pose in tool frame
            duration (float) : How much time this robot motion should take

        Raises:
            FrankaArmCollisionException if a collision is detected
        '''
        if pose.from_frame != 'franka_tool' or pose.to_frame != 'world':
            raise ValueError('pose has invalid frame names! Make sure pose has from_frame=franka_tool and to_frame=world')

        tool_base_pose = tool_pose * self._tool_delta_pose.inverse()

        skill = ArmMoveToGoalWithDefaultSensorSkill()
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)
        skill.add_feedback_controller_params(FC.DEFAULT_TORQUE_CONTROLLER_PARAMS)
        skill.add_termination_params(FC.DEFAULT_TERM_PARAMS) 

        skill.add_trajectory_params([duration] + tool_base_pose.matrix.T.flatten().tolist())
        goal = skill.create_goal()
        
        self._send_goal(goal, cb=lambda x: skill.feedback_callback(x))

    def goto_pose_delta(self, delta_tool_pose, duration=3):
        '''Commands Arm to the given delta pose via linear interpolation

        Args:
            delta_tool_pose (RigidTransform) : Delta pose in tool frame
            duration (float) : How much time this robot motion should take

        Raises:
            FrankaArmCollisionException if a collision is detected
        '''
        if delta_tool_pose.from_frame != 'franka_tool' or delta_tool_pose.to_frame != 'franka_tool':
            raise ValueError('delta_pose has invalid frame names! Make sure delta_pose has from_frame=franka_tool and to_frame=franka_tool')

        delta_tool_base_pose = self._tool_delta_pose * delta_tool_pose * self._tool_delta_pose.inverse()

        skill = ArmRelativeMotionWithDefaultSensorSkill()
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)
        skill.add_feedback_controller_params(FC.DEFAULT_TORQUE_CONTROLLER_PARAMS) 
        skill.add_termination_params(FC.DEFAULT_TERM_PARAMS)

        skill.add_trajectory_params([duration] + delta_tool_base_pose.translation.tolist() + delta_tool_base_pose.quaternion.tolist())
        goal = skill.create_goal()
        
        self._send_goal(goal, cb=lambda x: skill.feedback_callback(x))
        
    def goto_joints(self, joints, duration=3):
        '''Commands Arm to the given joint configuration

        Args:
            joints (list): A list of 7 numbers that correspond to joint angles in radians
            duration (float) : How much time this robot motion should take       

        Raises:
            ValueError: If is_joints_reachable(joints) returns False
            FrankaArmCollisionException if a collision is detected
        '''
        if not self.is_joints_reachable(joints):
            raise ValueError('Joints not reachable!')

        skill = JointPoseWithDefaultSensorSkill()
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)
        skill.add_termination_params(FC.DEFAULT_TERM_PARAMS)

        skill.add_trajectory_params([duration] + np.array(joints).tolist())
        goal = skill.create_goal()
        
        self._send_goal(goal, cb=lambda x: skill.feedback_callback(x))
    
    def apply_joint_torques(self, torques, duration):
        '''Commands Arm to apply given joint torques for duration seconds

        Args:
            torques (list): A list of 7 numbers that correspond to torques in Nm
            duration (float): A float in the unit of seconds

        Raises:
            FrankaArmCollisionException if a collision is detected
        '''
        pass

    def apply_effector_forces_torques(self, duration, forces=None, torques=None):
        '''Applies the given end-effector forces and torques in N and Nm

        Args:
            duration (float): A float in the unit of seconds
            forces (list): Optional (defaults to None). 
                A list of 3 numbers that correspond to end-effector forces in 3 directions
            torques (list): Optional (defaults to None).
                A list of 3 numbers that correspond to end-effector torques in 3 axes

        Raises:
            FrankaArmCollisionException if a collision is detected
        '''
        forces = [0, 0, 0] if forces is None else np.array(forces).tolist()
        torques = [0, 0, 0] if torques is None else np.array(torques).tolist()

        if np.linalg.norm(forces) > FC.MAX_FORCE_NORM:
            raise ValueError('Force magnitude exceeds safety threshold of {}'.format(FC.MAX_FORCE_NORM))
        if np.linalg.norm(torques) > FC.MAX_TORQUE_NORM:
            raise ValueError('Torque magnitude exceeds safety threshold of {}'.format(FC.MAX_TORQUE_NORM))

        skill = ForceTorqueSkill()
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)
        skill.add_termination_params([duration])

        skill.add_trajectory_params([0] + forces + torques)
        goal = skill.create_goal()
        
        self._send_goal(goal, cb=lambda x: skill.feedback_callback(x))

    def goto_gripper(self, width, speed=0.04, force=None):
        '''Commands gripper to goto a certain width, applying up to the given (default is max) force if needed

        Args:
            width (float): A float in the unit of meters
            speed (float): Gripper operation speed in meters per sec
            force (float): Max gripper force to apply in N. Default to None, which gives acceptable force

        Raises:
            ValueError: If width is less than 0 or greater than TODO(jacky) the maximum gripper opening
        '''
        skill = GripperWithDefaultSensorSkill()
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        # TODO(jacky): why is wait time needed?
        if force is not None:
            skill.add_trajectory_params([width, speed, force, FC.GRIPPER_WAIT_TIME])  # Gripper Width, Gripper Speed, Wait Time
        else:
            skill.add_trajectory_params([width, speed, FC.GRIPPER_WAIT_TIME])  # Gripper Width, Gripper Speed, Wait Time
            
        goal = skill.create_goal()

        self._send_goal(goal, cb=lambda x: skill.feedback_callback(x))

    def open_gripper(self):
        '''Opens gripper to maximum width
        '''
        self.goto_gripper(FC.GRIPPER_WIDTH_MAX)

    def close_gripper(self, grasp=True):
        '''Closes the gripper as much as possible
        '''
        self.goto_gripper(FC.GRIPPER_WIDTH_MIN, force=FC.GRIPPER_MAX_FORCE if grasp else None)

    '''
    Reads
    '''
    
    def get_robot_state(self):
        '''
        Returns:
            dict of full robot state data
        '''
        return self._sub.get_data()

    def get_pose(self):
        '''
        Returns:
            pose (RigidTransform) of the current end-effector
        '''
        tool_base_pose = self._sub.get_pose()

        tool_pose = tool_base_pose * self._tool_delta_pose

        return tool_pose

    def get_joints(self):
        '''
        Returns:
            ndarray of shape (7,) of joint angles in radians
        '''
        return self._sub.get_joints()

    def get_joint_torques(self):
        '''
        Returns:
            ndarray of shape (7,) of joint torques in Nm
        '''
        return self._sub.get_joint_torques()

    def get_joint_velocities(self):
        '''
        Returns:
            ndarray of shape (7,) of joint velocities in rads/s
        '''
        return self._sub.get_joint_velocities()

    def get_gripper_width(self):
        '''
        Returns:
            float of gripper width in meters
        '''
        return self._sub.get_gripper_width()

    def get_gripper_is_grasped(self):
        '''
        Returns:
            True if gripper is grasping something. False otherwise
        '''
        return self._sub.get_gripper_is_grasped()

    def get_speed(self, speed):
        '''
        Returns:
            float of current target speed parameter
        '''
        pass

    def get_tool_base_pose(self):
        '''
        Returns:
            RigidTransform of current tool base pose
        '''
        return self._tool_delta_pose.copy()

    '''
    Sets
    '''
    
    def set_tool_delta_pose(self, tool_delta_pose):
        '''Sets the tool pose relative to the end-effector pose

        Args:
            tool_delta_pose (RigidTransform)
        '''
        if tool_delta_pose.from_frame != 'franka_tool' or tool_delta_pose.to_frame != 'franka_tool_base':
            raise ValueError('tool_delta_pose has invalid frame names! Make sure the has from_frame=franka_tool, and to_frame=franka_tool_base')

        self._tool_delta_pose = tool_delta_pose.copy()

    def set_speed(self, speed):
        '''Sets current target speed parameter
        
        Args:
            speed (float)
        '''
        pass

    '''
    Misc
    '''
    def reset_joints(self):
        '''Commands Arm to goto hardcoded home joint configuration

        Raises:
            FrankaArmCollisionException if a collision is detected
        '''
        self.goto_joints(FC.HOME_JOINTS, duration=5)

    def is_joints_reachable(self, joints):
        '''
        Returns:
            True if all joints within joint limits
        '''
        for i, val in enumerate(joints):
            if val <= FC.JOINT_LIMITS_MIN[i] or val >= FC.JOINT_LIMITS_MAX[i]:
                return False

        return True
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config['num_patterns']
    max_tries = config['max_tries']
    
    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config['pattern_tf'])
    
    # initialize sensor
    sensor_frame = config['sensor_frame']
    sensor = EnsensoSensor(sensor_frame)

    # initialize node
    rospy.init_node('register_ensenso', anonymous=True)
    rospy.wait_for_service('/%s/collect_pattern' %(sensor_frame))
    rospy.wait_for_service('/%s/estimate_pattern_pose' %(sensor_frame))
    
    # start sensor
    print('Starting sensor')
    sensor.start()
    time.sleep(1)
    print('Sensor initialized')
    
    # perform registration
    try:
        print('Collecting patterns')
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy('/%s/collect_pattern' %(sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer,
                                   clear_buffer,
                                   decode,
                                   grid_spacing)
            if resp.success:
                print('Detected pattern %d' %(num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError('Failed to detect calibration pattern!')
            
        print('Estimating pattern pose')
        estimate_pattern_pose = rospy.ServiceProxy('/%s/estimate_pattern_pose' %(sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([resp.pose.orientation.w,
                                     resp.pose.orientation.x,
                                     resp.pose.orientation.y,
                                     resp.pose.orientation.z])
        t_pattern_camera = np.array([resp.pose.position.x,
                                     resp.pose.position.y,
                                     resp.pose.position.z])
        T_pattern_camera = RigidTransform(rotation=q_pattern_camera,
                                          translation=t_pattern_camera,
                                          from_frame='pattern',
                                          to_frame=sensor_frame)
    except rospy.ServiceException as e:
        print('Service call failed: %s' %(str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()
    
    # save final transformation
    output_dir = os.path.join(config['calib_dir'], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, '%s_to_world.tf' %(sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, '%s.intr' %(sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print('Final Result for sensor %s' %(sensor_frame))
    print('Rotation: ')
    print(T_camera_world.rotation)
    print('Quaternion: ')
    print(T_camera_world.quaternion)
    print('Translation: ')
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config['use_robot']:  
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0],
                                    [0, -1.0, 0],
                                    [0, 0, -1.0]])
        t_gripper_world = np.array([target_pt_world.x + config['gripper_offset_x'],
                                    target_pt_world.y + config['gripper_offset_y'],
                                    target_pt_world.z + config['gripper_offset_z']])
        T_gripper_world = RigidTransform(rotation=R_gripper_world,
                                         translation=t_gripper_world,
                                         from_frame='gripper',
                                         to_frame='world')

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0,0,0.05), from_frame='world', to_frame='world')
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)
        
        # wait for human measurement
        yesno = raw_input('Take measurement. Hit [ENTER] when done')
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()
Example #14
0
    def object_to_camera_poses(self):
        """Turn the params into a set of object to camera transformations.

        Returns
        -------
        :obj:`list` of :obj:`RigidTransform`
            A list of rigid transformations that transform from object space
            to camera space.
        """
        # compute increments in radial coordinates
        if self.max_radius == self.min_radius:
            radius_inc = 1
        elif self.num_radii == 1:
            radius_inc = self.max_radius - self.min_radius + 1
        else:
            radius_inc = (self.max_radius - self.min_radius) / (self.num_radii - 1)
        az_inc = (self.max_az - self.min_az) / self.num_az
        if self.max_elev == self.min_elev:
            elev_inc = 1
        elif self.num_elev == 1:
            elev_inc = self.max_elev - self.min_elev + 1
        else:
            elev_inc = (self.max_elev - self.min_elev) / (self.num_elev - 1)
        roll_inc = (self.max_roll - self.min_roll) / self.num_roll

        # create a pose for each set of spherical coords
        object_to_camera_poses = []
        radius = self.min_radius
        while radius <= self.max_radius:
            elev = self.min_elev
            while elev <= self.max_elev:
                az = self.min_az
                while az < self.max_az: #not inclusive due to topology (simplifies things)
                    roll = self.min_roll
                    while roll < self.max_roll:

                        # generate camera center from spherical coords
                        camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze()
                        camera_z_obj = -camera_center_obj / np.linalg.norm(camera_center_obj)

                        # find the canonical camera x and y axes
                        camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0])
                        if np.linalg.norm(camera_x_par_obj) == 0:
                            camera_x_par_obj = np.array([1, 0, 0])
                        camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj)
                        camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
                        camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj)
                        if camera_y_par_obj[2] > 0:
                            camera_x_par_obj = -camera_x_par_obj
                            camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
                            camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj)

                        # rotate by the roll
                        R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj, camera_z_obj]
                        R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0],
                                                        [np.sin(roll), np.cos(roll), 0],
                                                        [0, 0, 1]])
                        R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera)
                        t_obj_camera = camera_center_obj

                        # create final transform
                        T_obj_camera = RigidTransform(R_obj_camera, t_obj_camera,
                                                      from_frame='camera', to_frame='obj')
                        object_to_camera_poses.append(T_obj_camera.inverse())
                        roll += roll_inc
                    az += az_inc
                elev += elev_inc
            radius += radius_inc
        return object_to_camera_poses
    def get_camera_chessboard(sensor_frame, sensor_type, device_num, config):
        # load cfg
        num_transform_avg = config['num_transform_avg']
        num_images = config['num_images']
        sx = config['corners_x']
        sy = config['corners_y']
        color_image_upsample_rate = config['color_image_upsample_rate']
        vis = config['vis']

        # open sensor
        if sensor_type.lower() in CameraChessboardRegister._SENSOR_TYPES:
            sensor = Kinect2Sensor(
                device_num=device_num,
                frame=sensor_frame,
                packet_pipeline_mode=Kinect2PacketPipelineMode.CPU)
        else:
            logging.warning(
                'Could not register device at %s. Sensor type %s not supported'
                % (sensor_frame, sensor_type))
        logging.info('Registering camera %s' % (sensor_frame))
        sensor.start()

        # repeat registration multiple times and average results
        R = np.zeros([3, 3])
        t = np.zeros([3, 1])
        points_3d_plane = PointCloud(np.zeros([3, sx * sy]),
                                     frame=sensor.ir_frame)

        k = 0
        while k < num_transform_avg:
            # average a bunch of depth images together
            depth_ims = np.zeros([
                Kinect2Sensor.DEPTH_IM_HEIGHT, Kinect2Sensor.DEPTH_IM_WIDTH,
                num_images
            ])
            for i in range(num_images):
                small_color_im, new_depth_im, _ = sensor.frames()
                depth_ims[:, :, i] = new_depth_im.data

            med_depth_im = np.median(depth_ims, axis=2)
            depth_im = DepthImage(med_depth_im, sensor.ir_frame)

            # find the corner pixels in an upsampled version of the color image
            big_color_im = small_color_im.resize(color_image_upsample_rate)
            corner_px = CameraChessboardRegister._find_chessboard(big_color_im,
                                                                  sx=sx,
                                                                  sy=sy,
                                                                  vis=vis)

            if corner_px is None:
                logging.error('No chessboard detected')
                continue

            # convert back to original image
            small_corner_px = corner_px / color_image_upsample_rate

            if vis:
                plt.figure()
                plt.imshow(small_color_im.data)
                for i in range(sx):
                    plt.scatter(small_corner_px[i, 0],
                                small_corner_px[i, 1],
                                s=25,
                                c='b')
                plt.axis('off')
                plt.show()

            # project points into 3D
            camera_intr = sensor.ir_intrinsics
            points_3d = camera_intr.deproject(depth_im)

            # get round chessboard ind
            corner_px_round = np.round(small_corner_px).astype(np.uint16)
            corner_ind = depth_im.ij_to_linear(corner_px_round[:, 0],
                                               corner_px_round[:, 1])
            if corner_ind.shape[0] != sx * sy:
                print 'Did not find all corners. Discarding...'
                continue

            # average 3d points
            points_3d_plane = (k * points_3d_plane +
                               points_3d[corner_ind]) / (k + 1)
            logging.info('Registration iteration %d of %d' %
                         (k + 1, config['num_transform_avg']))
            k += 1

        # fit a plane to the chessboard corners
        X = np.c_[points_3d_plane.x_coords, points_3d_plane.y_coords,
                  np.ones(points_3d_plane.num_points)]
        y = points_3d_plane.z_coords
        A = X.T.dot(X)
        b = X.T.dot(y)
        w = np.linalg.inv(A).dot(b)
        n = np.array([w[0], w[1], -1])
        n = n / np.linalg.norm(n)
        mean_point_plane = points_3d_plane.mean()

        # find x-axis of the chessboard coordinates on the fitted plane
        T_camera_table = RigidTransform(
            translation=-points_3d_plane.mean().data,
            from_frame=points_3d_plane.frame,
            to_frame='table')
        points_3d_centered = T_camera_table * points_3d_plane

        # get points along y
        coord_pos_y = int(math.floor(sx * (sy - 1) / 2.0))
        coord_neg_y = int(math.ceil(sx * (sy + 1) / 2.0))
        points_pos_y = points_3d_centered[:coord_pos_y]
        points_neg_y = points_3d_centered[coord_neg_y:]
        y_axis = np.mean(points_pos_y.data, axis=1) - np.mean(
            points_neg_y.data, axis=1)
        y_axis = y_axis - np.vdot(y_axis, n) * n
        y_axis = y_axis / np.linalg.norm(y_axis)
        x_axis = np.cross(-n, y_axis)

        # WARNING! May need symmetry breaking but it appears the points are ordered consistently

        # produce translation and rotation from plane center and chessboard basis
        rotation_cb_camera = rotation_from_axes(x_axis, y_axis, n)
        translation_cb_camera = mean_point_plane.data
        T_cb_camera = RigidTransform(rotation=rotation_cb_camera,
                                     translation=translation_cb_camera,
                                     from_frame='cb',
                                     to_frame=sensor.frame)

        T_camera_cb = T_cb_camera.inverse()
        cb_points_cam = points_3d[corner_ind]

        # optionally display cb corners with detected pose in 3d space
        if config['debug']:
            # display image with axes overlayed
            cb_center_im = camera_intr.project(
                Point(T_cb_camera.translation, frame=sensor.ir_frame))
            cb_x_im = camera_intr.project(
                Point(T_cb_camera.translation +
                      T_cb_camera.x_axis * config['scale_amt'],
                      frame=sensor.ir_frame))
            cb_y_im = camera_intr.project(
                Point(T_cb_camera.translation +
                      T_cb_camera.y_axis * config['scale_amt'],
                      frame=sensor.ir_frame))
            cb_z_im = camera_intr.project(
                Point(T_cb_camera.translation +
                      T_cb_camera.z_axis * config['scale_amt'],
                      frame=sensor.ir_frame))
            x_line = np.array([cb_center_im.data, cb_x_im.data])
            y_line = np.array([cb_center_im.data, cb_y_im.data])
            z_line = np.array([cb_center_im.data, cb_z_im.data])

            plt.figure()
            plt.imshow(small_color_im.data)
            plt.scatter(cb_center_im.data[0], cb_center_im.data[1])
            plt.plot(x_line[:, 0], x_line[:, 1], c='r', linewidth=3)
            plt.plot(y_line[:, 0], y_line[:, 1], c='g', linewidth=3)
            plt.plot(z_line[:, 0], z_line[:, 1], c='b', linewidth=3)
            plt.axis('off')
            plt.title('Chessboard frame in camera %s' % (sensor.frame))
            plt.show()

        return T_camera_cb, cb_points_cam, points_3d_plane
Example #16
0
    def object_to_camera_poses(self, camera_intr):
        """Turn the params into a set of object to camera transformations.

        Returns
        -------
        :obj:`list` of :obj:`RigidTransform`
            A list of rigid transformations that transform from object space
            to camera space.
        :obj:`list` of :obj:`RigidTransform`
            A list of rigid transformations that transform from object space
            to camera space without the translation in the plane
        :obj:`list` of :obj:`CameraIntrinsics`
            A list of camera intrinsics that project the translated object
            into the center pixel of the camera, simulating cropping
        """
        # compute increments in radial coordinates
        if self.max_radius == self.min_radius:
            radius_inc = 1
        elif self.num_radii == 1:
            radius_inc = self.max_radius - self.min_radius + 1
        else:
            radius_inc = (self.max_radius - self.min_radius) / (self.num_radii - 1)
        az_inc = (self.max_az - self.min_az) / self.num_az
        if self.max_elev == self.min_elev:
            elev_inc = 1
        elif self.num_elev == 1:
            elev_inc = self.max_elev - self.min_elev + 1
        else:
            elev_inc = (self.max_elev - self.min_elev) / (self.num_elev - 1)
        roll_inc = (self.max_roll - self.min_roll) / self.num_roll

        if self.max_x == self.min_x:
            x_inc = 1
        elif self.num_x == 1:
            x_inc = self.max_x - self.min_x + 1
        else:
            x_inc = (self.max_x - self.min_x) / (self.num_x - 1)


        if self.max_y == self.min_y:
            y_inc = 1
        elif self.num_y == 1:
            y_inc = self.max_y - self.min_y + 1
        else:
            y_inc = (self.max_y - self.min_y) / (self.num_y - 1)

        # create a pose for each set of spherical coords
        object_to_camera_poses = []
        object_to_camera_normalized_poses = []
        camera_shifted_intrinsics = []
        radius = self.min_radius
        while radius <= self.max_radius:
            elev = self.min_elev
            while elev <= self.max_elev:
                az = self.min_az
                while az < self.max_az: #not inclusive due to topology (simplifies things)
                    roll = self.min_roll
                    while roll < self.max_roll:
                        x = self.min_x
                        while x <= self.max_x:
                            y = self.min_y
                            while y <= self.max_y:
                                num_poses = len(object_to_camera_poses)

                                # generate camera center from spherical coords
                                delta_t = np.array([x, y, 0])
                                camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze() + delta_t
                                camera_z_obj = -np.array([sph2cart(radius, az, elev)]).squeeze()
                                camera_z_obj = camera_z_obj / np.linalg.norm(camera_z_obj)

                                # find the canonical camera x and y axes
                                camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0])
                                if np.linalg.norm(camera_x_par_obj) == 0:
                                    camera_x_par_obj = np.array([1, 0, 0])
                                camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj)
                                camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
                                camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj)
                                if camera_y_par_obj[2] > 0:
                                    print 'Flipping', num_poses
                                    camera_x_par_obj = -camera_x_par_obj
                                    camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj)
                                    camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj)

                                # rotate by the roll
                                R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj, camera_z_obj]
                                R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0],
                                                                [np.sin(roll), np.cos(roll), 0],
                                                                [0, 0, 1]])
                                R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera)
                                t_obj_camera = camera_center_obj

                                # create final transform
                                T_obj_camera = RigidTransform(R_obj_camera, t_obj_camera,
                                                              from_frame='camera',
                                                              to_frame='obj')
                            
                                object_to_camera_poses.append(T_obj_camera.inverse())

                                # compute pose without the center offset because we can easily add in the object offset later
                                t_obj_camera_normalized = camera_center_obj - delta_t
                                T_obj_camera_normalized = RigidTransform(R_obj_camera, t_obj_camera_normalized,
                                                                         from_frame='camera',
                                                                         to_frame='obj')
                                object_to_camera_normalized_poses.append(T_obj_camera_normalized.inverse())

                                # compute new camera center by projecting object 0,0,0 into the camera
                                center_obj_obj = Point(np.zeros(3), frame='obj')
                                center_obj_camera = T_obj_camera.inverse() * center_obj_obj
                                u_center_obj = camera_intr.project(center_obj_camera)
                                camera_shifted_intr = copy.deepcopy(camera_intr)
                                camera_shifted_intr.cx = 2 * camera_intr.cx - float(u_center_obj.x)
                                camera_shifted_intr.cy = 2 * camera_intr.cy - float(u_center_obj.y)
                                camera_shifted_intrinsics.append(camera_shifted_intr)

                                y += y_inc
                            x += x_inc
                        roll += roll_inc
                    az += az_inc
                elev += elev_inc
            radius += radius_inc
        return object_to_camera_poses, object_to_camera_normalized_poses, camera_shifted_intrinsics
Example #17
0
 def T_obj_world(self):
     T_world_obj = RigidTransform(rotation=self.r.T,
                                  translation=self.x0,
                                  from_frame='world',
                                  to_frame='obj')
     return T_world_obj.inverse()
Example #18
0
    def register(sensor, config):
        """
        Registers a camera to a chessboard.

        Parameters
        ----------
        sensor : :obj:`perception.RgbdSensor`
            the sensor to register
        config : :obj:`autolab_core.YamlConfig` or :obj:`dict`
            configuration file for registration

        Returns
        -------
        :obj:`ChessboardRegistrationResult`
            the result of registration

        Notes
        -----
        The config must have the parameters specified in the Other Parameters section.

        Other Parameters
        ----------------
        num_transform_avg : int
            the number of independent registrations to average together
        num_images : int
            the number of images to read for each independent registration
        corners_x : int
            the number of chessboard corners in the x-direction
        corners_y : int
            the number of chessboard corners in the y-direction
        color_image_rescale_factor : float
            amount to rescale the color image for detection (numbers around 4-8 are useful)
        vis : bool
            whether or not to visualize the registration
        """
        # read config
        num_transform_avg = config['num_transform_avg']
        num_images = config['num_images']
        sx = config['corners_x']
        sy = config['corners_y']
        point_order = config['point_order']
        color_image_rescale_factor = config['color_image_rescale_factor']
        flip_normal = config['flip_normal']
        y_points_left = False
        if 'y_points_left' in config.keys() and sx == sy:
            y_points_left = config['y_points_left']
            num_images = 1
        vis = config['vis']

        # read params from sensor
        logging.info('Registering camera %s' %(sensor.frame))
        ir_intrinsics = sensor.ir_intrinsics

        # repeat registration multiple times and average results
        R = np.zeros([3,3])
        t = np.zeros([3,1])
        points_3d_plane = PointCloud(np.zeros([3, sx*sy]), frame=sensor.ir_frame)

        k = 0
        while k < num_transform_avg:
            # average a bunch of depth images together
            depth_ims = None
            for i in range(num_images):
                start = time.time()
                small_color_im, new_depth_im, _ = sensor.frames()
                end = time.time()
                logging.info('Frames Runtime: %.3f' %(end-start))
                if depth_ims is None:
                    depth_ims = np.zeros([new_depth_im.height,
                                          new_depth_im.width,
                                          num_images])
                depth_ims[:,:,i] = new_depth_im.data

            med_depth_im = np.median(depth_ims, axis=2)
            depth_im = DepthImage(med_depth_im, sensor.ir_frame)

            # find the corner pixels in an upsampled version of the color image
            big_color_im = small_color_im.resize(color_image_rescale_factor)
            corner_px = big_color_im.find_chessboard(sx=sx, sy=sy)

            if vis:
                plt.figure()
                plt.imshow(big_color_im.data)
                for i in range(sx):
                    plt.scatter(corner_px[i,0], corner_px[i,1], s=25, c='b')
                plt.show()

            if corner_px is None:
                logging.error('No chessboard detected! Check camera exposure settings')
                continue

            # convert back to original image
            small_corner_px = corner_px / color_image_rescale_factor
        
            if vis:
                plt.figure()
                plt.imshow(small_color_im.data)
                for i in range(sx):
                    plt.scatter(small_corner_px[i,0], small_corner_px[i,1], s=25, c='b')
                plt.axis('off')
                plt.show()

            # project points into 3D
            camera_intr = sensor.ir_intrinsics
            points_3d = camera_intr.deproject(depth_im)

            # get round chessboard ind
            corner_px_round = np.round(small_corner_px).astype(np.uint16)
            corner_ind = depth_im.ij_to_linear(corner_px_round[:,0], corner_px_round[:,1])
            if corner_ind.shape[0] != sx*sy:
                logging.warning('Did not find all corners. Discarding...')
                continue

            # average 3d points
            points_3d_plane = (k * points_3d_plane + points_3d[corner_ind]) / (k + 1)
            logging.info('Registration iteration %d of %d' %(k+1, config['num_transform_avg']))
            k += 1

        # fit a plane to the chessboard corners
        X = np.c_[points_3d_plane.x_coords, points_3d_plane.y_coords, np.ones(points_3d_plane.num_points)]
        y = points_3d_plane.z_coords
        A = X.T.dot(X)
        b = X.T.dot(y)
        w = np.linalg.inv(A).dot(b)
        n = np.array([w[0], w[1], -1])
        n = n / np.linalg.norm(n)
        if flip_normal:
            n = -n
        mean_point_plane = points_3d_plane.mean()
        
        # find x-axis of the chessboard coordinates on the fitted plane
        T_camera_table = RigidTransform(translation = -points_3d_plane.mean().data,
                                    from_frame=points_3d_plane.frame,
                                    to_frame='table')
        points_3d_centered = T_camera_table * points_3d_plane

        # get points along y
        if point_order == 'row_major':
            coord_pos_x = int(math.floor(sx*sy/2.0))
            coord_neg_x = int(math.ceil(sx*sy/2.0))
            
            points_pos_x = points_3d_centered[coord_pos_x:]
            points_neg_x = points_3d_centered[:coord_neg_x]
            x_axis = np.mean(points_pos_x.data, axis=1) - np.mean(points_neg_x.data, axis=1)
            x_axis = x_axis - np.vdot(x_axis, n)*n
            x_axis = x_axis / np.linalg.norm(x_axis)
            y_axis = np.cross(n, x_axis)
        else:
            coord_pos_y = int(math.floor(sx*(sy-1)/2.0))
            coord_neg_y = int(math.ceil(sx*(sy+1)/2.0))
            points_pos_y = points_3d_centered[:coord_pos_y]
            points_neg_y = points_3d_centered[coord_neg_y:]
            y_axis = np.mean(points_pos_y.data, axis=1) - np.mean(points_neg_y.data, axis=1)
            y_axis = y_axis - np.vdot(y_axis, n)*n
            y_axis = y_axis / np.linalg.norm(y_axis)
            x_axis = np.cross(-n, y_axis)

        # produce translation and rotation from plane center and chessboard basis
        rotation_cb_camera = RigidTransform.rotation_from_axes(x_axis, y_axis, n)
        translation_cb_camera = mean_point_plane.data
        T_cb_camera = RigidTransform(rotation=rotation_cb_camera,
                                     translation=translation_cb_camera,
                                     from_frame='cb',
                                     to_frame=sensor.frame)
        
        if y_points_left and np.abs(T_cb_camera.y_axis[1]) > 0.1:
            if T_cb_camera.x_axis[0] > 0:
                T_cb_camera.rotation = T_cb_camera.rotation.dot(RigidTransform.z_axis_rotation(-np.pi/2).T)
            else:
                T_cb_camera.rotation = T_cb_camera.rotation.dot(RigidTransform.z_axis_rotation(np.pi/2).T)
        T_camera_cb = T_cb_camera.inverse()
                
        # optionally display cb corners with detected pose in 3d space
        if config['debug']:
            # display image with axes overlayed
            cb_center_im = camera_intr.project(Point(T_cb_camera.translation, frame=sensor.ir_frame))
            cb_x_im = camera_intr.project(Point(T_cb_camera.translation + T_cb_camera.x_axis * config['scale_amt'], frame=sensor.ir_frame))
            cb_y_im = camera_intr.project(Point(T_cb_camera.translation + T_cb_camera.y_axis * config['scale_amt'], frame=sensor.ir_frame))
            cb_z_im = camera_intr.project(Point(T_cb_camera.translation + T_cb_camera.z_axis * config['scale_amt'], frame=sensor.ir_frame))
            x_line = np.array([cb_center_im.data, cb_x_im.data])
            y_line = np.array([cb_center_im.data, cb_y_im.data])
            z_line = np.array([cb_center_im.data, cb_z_im.data])

            plt.figure()
            plt.imshow(small_color_im.data)
            plt.scatter(cb_center_im.data[0], cb_center_im.data[1])
            plt.plot(x_line[:,0], x_line[:,1], c='r', linewidth=3)
            plt.plot(y_line[:,0], y_line[:,1], c='g', linewidth=3)
            plt.plot(z_line[:,0], z_line[:,1], c='b', linewidth=3)
            plt.axis('off')
            plt.title('Chessboard frame in camera %s' %(sensor.frame))
            plt.show()

        return ChessboardRegistrationResult(T_camera_cb, points_3d_plane)
Example #19
0
class FrankaArm:
    def __init__(self,
                 rosnode_name='franka_arm_client',
                 ros_log_level=rospy.INFO,
                 robot_num=1):

        self._execute_skill_action_server_name = \
                '/execute_skill_action_server_node_'+str(robot_num)+'/execute_skill'
        self._robot_state_server_name = \
                '/get_current_robot_state_server_node_'+str(robot_num)+'/get_current_robot_state_server'
        self._robolib_status_server_name = \
                '/get_current_robolib_status_server_node_'+str(robot_num)+'/get_current_robolib_status_server'

        self._connected = False
        self._in_skill = False

        # set signal handler to handle ctrl+c and kill sigs
        signal.signal(signal.SIGINT, self._sigint_handler_gen())

        # init ROS
        rospy.init_node(rosnode_name,
                        disable_signals=True,
                        log_level=ros_log_level)

        rospy.wait_for_service(self._robolib_status_server_name)
        self._get_current_robolib_status = rospy.ServiceProxy(
            self._robolib_status_server_name, GetCurrentRobolibStatusCmd)

        self._state_client = FrankaArmStateClient(
            new_ros_node=False,
            robot_state_server_name=self._robot_state_server_name)
        self._client = actionlib.SimpleActionClient(
            self._execute_skill_action_server_name, ExecuteSkillAction)
        self._client.wait_for_server()
        self.wait_for_robolib()

        # done init ROS
        self._connected = True

        # set default identity tool delta pose
        self._tool_delta_pose = RigidTransform(from_frame='franka_tool',
                                               to_frame='franka_tool_base')

    def wait_for_robolib(self, timeout=None):
        '''Blocks execution until robolib gives ready signal.
        '''
        timeout = FC.DEFAULT_ROBOLIB_TIMEOUT if timeout is None else timeout
        t_start = time()
        while time() - t_start < timeout:
            robolib_status = self._get_current_robolib_status().robolib_status
            if robolib_status.is_ready:
                return
            sleep(1e-2)
        raise FrankaArmCommException('Robolib status not ready for {}s'.format(
            FC.DEFAULT_ROBOLIB_TIMEOUT))

    def _sigint_handler_gen(self):
        def sigint_handler(sig, frame):
            if self._connected and self._in_skill:
                self._client.cancel_goal()
            sys.exit(0)

        return sigint_handler

    def _send_goal(self, goal, cb, ignore_errors=True):
        '''
        Raises:
            FrankaArmCommException if a timeout is reached
            FrankaArmException if robolib gives an error
            FrankaArmRobolibNotReadyException if robolib is not ready
        '''
        self._in_skill = True
        self._client.send_goal(goal, feedback_cb=cb)

        done = False
        while not done:
            robolib_status = self._get_current_robolib_status().robolib_status

            e = None
            if rospy.is_shutdown():
                e = RuntimeError('rospy is down!')
            elif robolib_status.error_description:
                e = FrankaArmException(robolib_status.error_description)
            elif not robolib_status.is_ready:
                e = FrankaArmRobolibNotReadyException()

            if e is not None:
                if ignore_errors:
                    self.wait_for_robolib()
                    break
                else:
                    raise e

            done = self._client.wait_for_result(
                rospy.Duration.from_sec(FC.ACTION_WAIT_LOOP_TIME))

        self._in_skill = False
        return self._client.get_result()

    '''
    Controls
    '''

    def goto_pose(self,
                  tool_pose,
                  duration=3,
                  stop_on_contact_forces=None,
                  cartesian_impedances=None,
                  ignore_errors=True,
                  skill_desc='',
                  skill_type=SkillType.ImpedanceControlSkill):
        '''Commands Arm to the given pose via linear interpolation

        Args:
            tool_pose (RigidTransform) : End-effector pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
            ignore_errors : function ignores errors by default. If False, errors
                and some exceptions can be thrown
            skill_desc (string) : Skill description to use for logging on
                control-pc.
        '''
        return self._goto_pose(tool_pose, duration, stop_on_contact_forces,
                               cartesian_impedances, ignore_errors, skill_desc,
                               skill_type)

    def goto_pose_with_cartesian_control(self,
                                         tool_pose,
                                         duration=3.,
                                         stop_on_contact_forces=None,
                                         cartesian_impedances=None,
                                         ignore_errors=True,
                                         skill_desc=''):
        '''Commands Arm to the given pose via min-jerk interpolation.

        Use Franka's internal Cartesian Controller to execute this skill.

        Args:
            tool_pose (RigidTransform) : End-effector pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
            ignore_errors : function ignores errors by default. If False, errors
                and some exceptions can be thrown
            skill_desc (string) : Skill description to use for logging on
                control-pc.
        '''
        return self._goto_pose(tool_pose,
                               duration,
                               stop_on_contact_forces,
                               cartesian_impedances,
                               ignore_errors,
                               skill_desc,
                               skill_type=SkillType.CartesianPoseSkill)

    def goto_pose_with_impedance_control(self,
                                         tool_pose,
                                         duration=3.,
                                         stop_on_contact_forces=None,
                                         cartesian_impedances=None,
                                         ignore_errors=True,
                                         skill_desc=''):
        '''Commands Arm to the given pose via min-jerk interpolation.

        Use our own impedance controller (use jacobian to convert cartesian
            poses to joints.)

        Args:
            tool_pose (RigidTransform) : End-effector pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
            ignore_errors : function ignores errors by default. If False, errors
                and some exceptions can be thrown
            skill_desc (string) : Skill description to use for logging on
                control-pc.
        '''
        return self._goto_pose(tool_pose,
                               duration,
                               stop_on_contact_forces,
                               cartesian_impedances,
                               ignore_errors,
                               skill_desc,
                               skill_type=SkillType.ImpedanceControlSkill)

    def _goto_pose(self,
                   tool_pose,
                   duration=3,
                   stop_on_contact_forces=None,
                   cartesian_impedances=None,
                   ignore_errors=True,
                   skill_desc='',
                   skill_type=None):
        '''Commands Arm to the given pose via linear interpolation

        Args:
            tool_pose (RigidTransform) : End-effector pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
        '''
        if tool_pose.from_frame != 'franka_tool' or tool_pose.to_frame != 'world':
            raise ValueError(
                'pose has invalid frame names! Make sure pose has \
                              from_frame=franka_tool and to_frame=world')

        tool_base_pose = tool_pose * self._tool_delta_pose.inverse()

        skill = GoToPoseSkill(skill_desc, skill_type)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if cartesian_impedances is not None:
            skill.add_cartesian_impedances(cartesian_impedances)
        else:
            skill.add_feedback_controller_params(
                FC.DEFAULT_TORQUE_CONTROLLER_PARAMS)

        if stop_on_contact_forces is not None:
            skill.add_contact_termination_params(FC.DEFAULT_TERM_BUFFER_TIME,
                                                 stop_on_contact_forces,
                                                 stop_on_contact_forces)
        else:
            skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        skill.add_goal_pose_with_matrix(
            duration,
            tool_base_pose.matrix.T.flatten().tolist())
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def goto_pose_delta(self,
                        delta_tool_pose,
                        duration=3,
                        stop_on_contact_forces=None,
                        cartesian_impedances=None,
                        ignore_errors=True,
                        skill_desc='',
                        skill_type=SkillType.ImpedanceControlSkill):
        '''Commands Arm to the given delta pose via linear interpolation and
        uses impedance control.

        Args:
            delta_tool_pose (RigidTransform) : Delta pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
            ignore_errors : function ignores errors by default. If False, errors
                and some exceptions can be thrown
            skill_desc (string) : Skill description to use for logging on
                control-pc.
        '''
        return self._goto_pose_delta(delta_tool_pose, duration,
                                     stop_on_contact_forces,
                                     cartesian_impedances, ignore_errors,
                                     skill_desc, skill_type)

    def goto_pose_delta_with_impedance_control(self,
                                               delta_tool_pose,
                                               duration=3,
                                               stop_on_contact_forces=None,
                                               cartesian_impedances=None,
                                               ignore_errors=True,
                                               skill_desc=''):
        return self._goto_pose_delta(
            delta_tool_pose,
            duration,
            stop_on_contact_forces,
            cartesian_impedances,
            ignore_errors,
            skill_desc,
            skill_type=SkillType.ImpedanceControlSkill)

    def goto_pose_delta_with_cartesian_control(self,
                                               delta_tool_pose,
                                               duration=3,
                                               stop_on_contact_forces=None,
                                               cartesian_impedances=None,
                                               ignore_errors=True,
                                               skill_desc=''):
        '''Commands Arm to the given delta pose via linear interpolation using
        franka's internal cartesian control.

        Args:
            delta_tool_pose (RigidTransform) : Delta pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
            cartesian_impedance (list): List of 6 floats. Used to set the cartesian
                impedance of Franka's internal cartesian controller. 
                List of (x, y, z, roll, pitch, yaw)
            ignore_errors : function ignores errors by default. If False, errors
                and some exceptions can be thrown
            skill_desc (string) : Skill description to use for logging on
                control-pc.
        '''
        return self._goto_pose_delta(delta_tool_pose,
                                     duration,
                                     stop_on_contact_forces,
                                     cartesian_impedances,
                                     ignore_errors,
                                     skill_desc,
                                     skill_type=SkillType.CartesianPoseSkill)

    def _goto_pose_delta(self,
                         delta_tool_pose,
                         duration=3,
                         stop_on_contact_forces=None,
                         cartesian_impedances=None,
                         ignore_errors=True,
                         skill_desc='',
                         skill_type=SkillType.ImpedanceControlSkill):
        '''Commands Arm to the given delta pose via linear interpolation

        Args:
            delta_tool_pose (RigidTransform) : Delta pose in tool frame
            duration (float) : How much time this robot motion should take
            stop_on_contact_forces (list): List of 6 floats corresponding to
                force limits on translation (xyz) and rotation about (xyz) axes.
                Default is None. If None then will not stop on contact.
        '''
        if delta_tool_pose.from_frame != 'franka_tool' \
                or delta_tool_pose.to_frame != 'franka_tool':
            raise ValueError('delta_pose has invalid frame names! ' \
                             'Make sure delta_pose has from_frame=franka_tool ' \
                             'and to_frame=franka_tool')

        delta_tool_base_pose = self._tool_delta_pose \
                * delta_tool_pose * self._tool_delta_pose.inverse()

        skill = GoToPoseDeltaSkill(skill_desc, skill_type)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if cartesian_impedances is not None:
            skill.add_cartesian_impedances(cartesian_impedances)
        else:
            skill.add_feedback_controller_params(
                FC.DEFAULT_TORQUE_CONTROLLER_PARAMS)

        if stop_on_contact_forces is not None:
            skill.add_contact_termination_params(FC.DEFAULT_TERM_BUFFER_TIME,
                                                 stop_on_contact_forces,
                                                 stop_on_contact_forces)
        else:
            skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        skill.add_relative_motion_with_quaternion(
            duration, delta_tool_base_pose.translation.tolist(),
            delta_tool_base_pose.quaternion.tolist())
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def goto_joints(self,
                    joints,
                    duration=5,
                    stop_on_contact_forces=None,
                    joint_impedances=None,
                    k_gains=None,
                    d_gains=None,
                    ignore_errors=True,
                    skill_desc='',
                    skill_type=SkillType.JointPositionSkill):
        '''Commands Arm to the given joint configuration

        Args:
            joints (list): A list of 7 numbers that correspond to joint angles
                           in radians
            duration (float): How much time this robot motion should take
            joint_impedances (list): A list of 7 numbers that represent the desired
                                     joint impedances for the internal robot joint
                                     controller

        Raises:
            ValueError: If is_joints_reachable(joints) returns False
        '''

        return self._goto_joints(joints, duration, stop_on_contact_forces,
                                 joint_impedances, k_gains, d_gains,
                                 ignore_errors, skill_desc, skill_type)

    def goto_joints_with_joint_control(self,
                                       joints,
                                       duration=5,
                                       stop_on_contact_forces=None,
                                       joint_impedances=None,
                                       ignore_errors=True,
                                       skill_desc=''):
        '''Commands Arm to the given joint configuration

        Args:
            joints (list): A list of 7 numbers that correspond to joint angles
                           in radians
            duration (float): How much time this robot motion should take
            joint_impedances (list): A list of 7 numbers that represent the desired
                                     joint impedances for the internal robot joint
                                     controller

        Raises:
            ValueError: If is_joints_reachable(joints) returns False
        '''

        return self._goto_joints(joints,
                                 duration,
                                 stop_on_contact_forces,
                                 joint_impedances,
                                 None,
                                 None,
                                 ignore_errors,
                                 skill_desc,
                                 skill_type=SkillType.JointPositionSkill)

    def goto_joints_with_impedance_control(self,
                                           joints,
                                           duration=5,
                                           stop_on_contact_forces=None,
                                           k_gains=None,
                                           d_gains=None,
                                           ignore_errors=True,
                                           skill_desc=''):
        '''Commands Arm to the given joint configuration

        Args:
            joints (list): A list of 7 numbers that correspond to joint angles
                           in radians
            duration (float): How much time this robot motion should take
            joint_impedances (list): A list of 7 numbers that represent the desired
                                     joint impedances for the internal robot joint
                                     controller

        Raises:
            ValueError: If is_joints_reachable(joints) returns False
        '''

        return self._goto_joints(joints,
                                 duration,
                                 stop_on_contact_forces,
                                 None,
                                 k_gains,
                                 d_gains,
                                 ignore_errors,
                                 skill_desc,
                                 skill_type=SkillType.ImpedanceControlSkill)

    def _goto_joints(self,
                     joints,
                     duration=5,
                     stop_on_contact_forces=None,
                     joint_impedances=None,
                     k_gains=None,
                     d_gains=None,
                     ignore_errors=True,
                     skill_desc='',
                     skill_type=SkillType.JointPositionSkill):
        '''Commands Arm to the given joint configuration

        Args:
            joints (list): A list of 7 numbers that correspond to joint angles
                           in radians
            duration (float): How much time this robot motion should take
            joint_impedances (list): A list of 7 numbers that represent the desired
                                     joint impedances for the internal robot joint
                                     controller

        Raises:
            ValueError: If is_joints_reachable(joints) returns False
        '''
        if not self.is_joints_reachable(joints):
            raise ValueError('Joints not reachable!')

        skill = GoToJointsSkill(skill_desc, skill_type)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if joint_impedances is not None:
            skill.add_joint_impedances(joint_impedances)
        elif k_gains is not None and d_gains is not None:
            skill.add_joint_gains(k_gains, d_gains)
        else:
            if (skill_type == SkillType.ImpedanceControlSkill):
                skill.add_joint_gains(FC.DEFAULT_K_GAINS, FC.DEFAULT_D_GAINS)
            else:
                skill.add_feedback_controller_params([])

        if stop_on_contact_forces is not None:
            skill.add_contact_termination_params(FC.DEFAULT_TERM_BUFFER_TIME,
                                                 stop_on_contact_forces,
                                                 stop_on_contact_forces)
        else:
            skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        skill.add_goal_joints(duration, joints)
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def apply_joint_torques(self, torques, duration, ignore_errors=True):
        '''Commands Arm to apply given joint torques for duration seconds

        Args:
            torques (list): A list of 7 numbers that correspond to torques in Nm.
            duration (float): A float in the unit of seconds
        '''
        pass

    def execute_joint_dmp(self,
                          dmp_info,
                          duration,
                          ignore_errors=True,
                          skill_desc='',
                          skill_type=SkillType.JointPositionSkill):
        '''Commands Arm to execute a given dmp for duration seconds

        Args:
            dmp_info (dict): Contains all the parameters of a DMP
                (phi_j, tau, alpha, beta, num_basis, num_sensors, mu, h,
                and weights)
            duration (float): A float in the unit of seconds
        '''

        skill = JointDMPSkill(skill_desc, skill_type)
        skill.add_initial_sensor_values(dmp_info['phi_j'])  # sensor values
        # Doesn't matter because we overwrite it with the initial joint positions anyway
        y0 = [-0.282, -0.189, 0.0668, -2.186, 0.0524, 1.916, -1.06273]
        # Run time, tau, alpha, beta, num_basis, num_sensor_value, mu, h, weight
        trajectory_params = [
                duration, dmp_info['tau'], dmp_info['alpha'], dmp_info['beta'],
                float(dmp_info['num_basis']), float(dmp_info['num_sensors'])] \
                + dmp_info['mu'] \
                + dmp_info['h'] \
                + y0 \
                + np.array(dmp_info['weights']).reshape(-1).tolist()

        skill.add_trajectory_params(trajectory_params)
        skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def execute_pose_dmp(self,
                         dmp_info,
                         duration,
                         ignore_errors=True,
                         skill_desc='',
                         skill_type=SkillType.CartesianPoseSkill):
        '''Commands Arm to execute a given dmp for duration seconds

        Args:
            dmp_info (dict): Contains all the parameters of a DMP
                (phi_j, tau, alpha, beta, num_basis, num_sensors, mu, h,
                and weights)
            duration (float): A float in the unit of seconds
        '''

        skill = PoseDMPSkill(skill_desc, skill_type)
        skill.add_initial_sensor_values(dmp_info['phi_j'])  # sensor values
        # Doesn't matter because we overwrite it with the initial position anyways
        y0 = [0.0, 0.0, 0.0]
        # Run time, tau, alpha, beta, num_basis, num_sensor_value, mu, h, weight
        trajectory_params = [
                duration, dmp_info['tau'], dmp_info['alpha'], dmp_info['beta'],
                float(dmp_info['num_basis']), float(dmp_info['num_sensors'])] \
                + dmp_info['mu'] \
                + dmp_info['h'] \
                + y0 \
                + np.array(dmp_info['weights']).reshape(-1).tolist()

        skill.add_trajectory_params(trajectory_params)
        skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def execute_goal_pose_dmp(self,
                              dmp_info,
                              duration,
                              ignore_errors=True,
                              phi_j=None,
                              skill_desc='',
                              skill_type=SkillType.CartesianPoseSkill):
        '''Commands Arm to execute a given dmp for duration seconds

        Args:
            dmp_info (dict): Contains all the parameters of a DMP
                (phi_j, tau, alpha, beta, num_basis, num_sensors, mu, h,
                and weights)
            duration (float): A float in the unit of seconds
        '''

        skill = GoalPoseDMPSkill(skill_desc, skill_type)
        skill.add_initial_sensor_values(dmp_info['phi_j'])  # sensor values
        # Doesn't matter because we overwrite it with the initial position anyways
        y0 = [0.0, 0.0, 0.0]
        if phi_j is None:
            phi_j = np.array([[-0.025, 1.], [0, 0.], [-0.05, 1.0]])
        # Run time, tau, alpha, beta, num_basis, num_sensor_value, mu, h, weight
        trajectory_params = [
                duration, dmp_info['tau'], dmp_info['alpha'], dmp_info['beta'],
                float(dmp_info['num_basis']), float(dmp_info['num_sensors'])] \
                + dmp_info['mu'] \
                + dmp_info['h'] \
                + y0 \
                + np.array(dmp_info['weights']).reshape(-1).tolist() \
                + np.array(phi_j).reshape(-1).tolist()

        skill.add_trajectory_params(trajectory_params)
        skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def apply_effector_forces_torques(self,
                                      run_duration,
                                      acc_duration,
                                      max_translation,
                                      max_rotation,
                                      forces=None,
                                      torques=None,
                                      ignore_errors=True,
                                      skill_desc=''):
        '''Applies the given end-effector forces and torques in N and Nm

        Args:
            run_duration (float): A float in the unit of seconds
            acc_duration (float): A float in the unit of seconds. How long to
                acc/de-acc to achieve desired force.
            forces (list): Optional (defaults to None).
                A list of 3 numbers that correspond to end-effector forces in
                    3 directions
            torques (list): Optional (defaults to None).
                A list of 3 numbers that correspond to end-effector torques in
                    3 axes

        Raises:
            ValueError if acc_duration > 0.5*run_duration, or if forces are
                too large
        '''
        if acc_duration > 0.5 * run_duration:
            raise ValueError(
                'acc_duration must be smaller than half of run_duration!')

        forces = [0, 0, 0] if forces is None else np.array(forces).tolist()
        torques = [0, 0, 0] if torques is None else np.array(torques).tolist()

        if np.linalg.norm(forces) * run_duration > FC.MAX_LIN_MOMENTUM:
            raise ValueError('Linear momentum magnitude exceeds safety '
                             'threshold of {}'.format(FC.MAX_LIN_MOMENTUM))
        if np.linalg.norm(torques) * run_duration > FC.MAX_ANG_MOMENTUM:
            raise ValueError('Angular momentum magnitude exceeds safety '
                             'threshold of {}'.format(FC.MAX_ANG_MOMENTUM))

        skill = ForceTorqueSkill(skill_desc=skill_desc)
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)
        skill.add_termination_params([0.1])

        skill.add_trajectory_params(
            [run_duration, acc_duration, max_translation, max_rotation] +
            forces + torques)
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def apply_effector_forces_along_axis(self,
                                         run_duration,
                                         acc_duration,
                                         max_translation,
                                         forces,
                                         ignore_errors=True,
                                         skill_desc=''):
        '''Applies the given end-effector forces and torques in N and Nm

        Args:
            run_duration (float): A float in the unit of seconds
            acc_duration (float): A float in the unit of seconds.
                How long to acc/de-acc to achieve desired force.
            max_translation (float): Max translation before the robot
                deaccelerates.
            forces (list): Optional (defaults to None).
                A list of 3 numbers that correspond to end-effector forces in
                    3 directions
        Raises:
            ValueError if acc_duration > 0.5*run_duration, or if forces are
                too large
        '''
        if acc_duration > 0.5 * run_duration:
            raise ValueError(
                'acc_duration must be smaller than half of run_duration!')
        if np.linalg.norm(
                forces) * run_duration > FC.MAX_LIN_MOMENTUM_CONSTRAINED:
            raise ValueError('Linear momentum magnitude exceeds safety ' \
                    'threshold of {}'.format(FC.MAX_LIN_MOMENTUM_CONSTRAINED))

        forces = np.array(forces)
        force_axis = forces / np.linalg.norm(forces)

        skill = ForceAlongAxisSkill(skill_desc=skill_desc)
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)
        skill.add_termination_params([0.1])
        skill.add_feedback_controller_params(
            FC.DEFAULT_FORCE_AXIS_CONTROLLER_PARAMS + force_axis.tolist())

        init_params = [run_duration, acc_duration, max_translation, 0]
        skill.add_trajectory_params(init_params + forces.tolist() + [0, 0, 0])
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def goto_gripper(self, width, speed=0.04, force=None, ignore_errors=True):
        '''Commands gripper to goto a certain width, applying up to the given
            (default is max) force if needed

        Args:
            width (float): A float in the unit of meters
            speed (float): Gripper operation speed in meters per sec
            force (float): Max gripper force to apply in N. Default to None,
                which gives acceptable force
        Raises:
            ValueError: If width is less than 0 or greater than TODO(jacky)
                the maximum gripper opening
        '''
        skill = GripperSkill()
        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if force is not None:
            skill.add_trajectory_params([width, speed, force])
        else:
            skill.add_trajectory_params([width, speed])

        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)
        # this is so the gripper state can be updated, which happens with a
        # small lag
        sleep(FC.GRIPPER_CMD_SLEEP_TIME)

    def stay_in_position(self,
                         duration=3,
                         translational_stiffness=600,
                         rotational_stiffness=50,
                         k_gains=None,
                         d_gains=None,
                         cartesian_impedances=None,
                         joint_impedances=None,
                         ignore_errors=True,
                         skill_desc='',
                         skill_type=SkillType.ImpedanceControlSkill,
                         feedback_controller_type=FeedbackControllerType.
                         CartesianImpedanceFeedbackController):
        '''Commands the Arm to stay in its current position with provided
        translation and rotation stiffnesses

        Args:
            duration (float) : How much time the robot should stay in place in
                seconds.
            translational_stiffness (float): Translational stiffness factor used
                in the torque controller.
                Default is 600. A value of 0 will allow free translational
                movement.
            rotational_stiffness (float): Rotational stiffness factor used in
                the torque controller.
                Default is 50. A value of 0 will allow free rotational movement.
        '''
        skill = StayInInitialPoseSkill(skill_desc, skill_type,
                                       feedback_controller_type)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if skill_type == SkillType.ImpedanceControlSkill:
            if feedback_controller_type == FeedbackControllerType.CartesianImpedanceFeedbackController:
                if cartesian_impedances is not None:
                    skill.add_cartesian_impedances(cartesian_impedances)
                else:
                    skill.add_feedback_controller_params(
                        [translational_stiffness] + [rotational_stiffness])
            elif feedback_controller_type == FeedbackControllerType.JointImpedanceFeedbackController:
                if k_gains is not None and d_gains is not None:
                    skill.add_joint_gains(k_gains, d_gains)
                else:
                    skill.add_feedback_controller_params([])
            else:
                skill.add_feedback_controller_params(
                    [translational_stiffness] + [rotational_stiffness])
        elif skill_type == SkillType.CartesianPoseSkill:
            if cartesian_impedances is not None:
                skill.add_cartesian_impedances(cartesian_impedances)
            else:
                skill.add_feedback_controller_params([])
        elif skill_type == SkillType.JointPositionSkill:
            if joint_impedances is not None:
                skill.add_joint_impedances(joint_impedances)
            else:
                skill.add_feedback_controller_params([])
        else:
            skill.add_feedback_controller_params([translational_stiffness] +
                                                 [rotational_stiffness])

        skill.add_run_time(duration)
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def run_guide_mode_with_selective_joint_compliance(
            self,
            duration=3,
            joint_impedances=None,
            k_gains=FC.DEFAULT_K_GAINS,
            d_gains=FC.DEFAULT_D_GAINS,
            ignore_errors=True,
            skill_desc='',
            skill_type=SkillType.ImpedanceControlSkill):
        '''Run guide mode with selective joint compliance given k and d gains
            for each joint

        Args:
            duration (float) : How much time the robot should be in selective
                               joint guide mode in seconds.
            k_gains (list): list of 7 k gains, one for each joint
                            Default is 600., 600., 600., 600., 250., 150., 50..
            d_gains (list): list of 7 d gains, one for each joint
                            Default is 50.0, 50.0, 50.0, 50.0, 30.0, 25.0, 15.0.
        '''
        skill = StayInInitialPoseSkill(
            skill_desc, skill_type,
            FeedbackControllerType.JointImpedanceFeedbackController)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if skill_type == SkillType.ImpedanceControlSkill:
            if k_gains is not None and d_gains is not None:
                skill.add_joint_gains(k_gains, d_gains)
        elif skill_type == SkillType.JointPositionSkill:
            if joint_impedances is not None:
                skill.add_joint_impedances(joint_impedances)
            else:
                skill.add_feedback_controller_params([])

        skill.add_run_time(duration)
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def run_guide_mode_with_selective_pose_compliance(
            self,
            duration=3,
            translational_stiffnesses=FC.DEFAULT_TRANSLATIONAL_STIFFNESSES,
            rotational_stiffnesses=FC.DEFAULT_ROTATIONAL_STIFFNESSES,
            cartesian_impedances=None,
            ignore_errors=True,
            skill_desc='',
            skill_type=SkillType.ImpedanceControlSkill):
        '''Run guide mode with selective pose compliance given translational
        and rotational stiffnesses

        Args:
            duration (float) : How much time the robot should be in selective
                pose guide mode in seconds.
            translational_stiffnesses (list): list of 3 translational stiffnesses,
                one for each axis (x,y,z) Default is 600.0, 600.0, 600.0
            rotational_stiffnesses (list): list of 3 rotational stiffnesses,
                one for axis (roll, pitch, yaw) Default is 50.0, 50.0, 50.0
        '''
        skill = StayInInitialPoseSkill(
            skill_desc, skill_type,
            FeedbackControllerType.CartesianImpedanceFeedbackController)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if skill_type == SkillType.ImpedanceControlSkill:
            if cartesian_impedances is not None:
                skill.add_cartesian_impedances(cartesian_impedances)
            else:
                skill.add_feedback_controller_params(
                    [translational_stiffnesses] + [rotational_stiffnesses])
        elif skill_type == SkillType.CartesianPoseSkill:
            if cartesian_impedances is not None:
                skill.add_cartesian_impedances(cartesian_impedances)
            else:
                skill.add_feedback_controller_params([])

        skill.add_feedback_controller_params(translational_stiffnesses +
                                             rotational_stiffnesses)

        skill.add_run_time(duration)
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def run_dynamic_joint_position_interpolation(
            self,
            joints,
            duration=5,
            stop_on_contact_forces=None,
            joint_impedances=None,
            k_gains=None,
            d_gains=None,
            ignore_errors=True,
            skill_desc='',
            skill_type=SkillType.JointPositionDynamicInterpolationSkill):
        '''Commands Arm to the given joint configuration

        Args:
            joints (list): A list of 7 numbers that correspond to joint angles
                           in radians
            duration (float): How much time this robot motion should take
            joint_impedances (list): A list of 7 numbers that represent the desired
                                     joint impedances for the internal robot joint
                                     controller

        Raises:
            ValueError: If is_joints_reachable(joints) returns False
        '''
        if not self.is_joints_reachable(joints):
            raise ValueError('Joints not reachable!')

        skill = GoToJointsSkill(skill_desc, skill_type)

        skill.add_initial_sensor_values(FC.EMPTY_SENSOR_VALUES)

        if joint_impedances is not None:
            skill.add_joint_impedances(joint_impedances)
        elif k_gains is not None and d_gains is not None:
            skill.add_joint_gains(k_gains, d_gains)
        else:
            if (skill_type == SkillType.ImpedanceControlSkill):
                skill.add_joint_gains(FC.DEFAULT_K_GAINS, FC.DEFAULT_D_GAINS)
            else:
                skill.add_feedback_controller_params([])

        if stop_on_contact_forces is not None:
            skill.add_contact_termination_params(FC.DEFAULT_TERM_BUFFER_TIME,
                                                 stop_on_contact_forces,
                                                 stop_on_contact_forces)
        else:
            skill.add_termination_params([FC.DEFAULT_TERM_BUFFER_TIME])

        skill.add_goal_joints(duration, joints)
        goal = skill.create_goal()

        self._send_goal(goal,
                        cb=lambda x: skill.feedback_callback(x),
                        ignore_errors=ignore_errors)

    def open_gripper(self):
        '''Opens gripper to maximum width
        '''
        self.goto_gripper(FC.GRIPPER_WIDTH_MAX)

    def close_gripper(self, grasp=True):
        '''Closes the gripper as much as possible
        '''
        self.goto_gripper(FC.GRIPPER_WIDTH_MIN,
                          force=FC.GRIPPER_MAX_FORCE if grasp else None)

    def run_guide_mode(self, duration=100):
        self.apply_effector_forces_torques(duration, 0, 0, 0)

    '''
    Reads
    '''

    def get_robot_state(self):
        '''
        Returns:
            dict of full robot state data
        '''
        return self._state_client.get_data()

    def get_pose(self):
        '''
        Returns:
            pose (RigidTransform) of the current end-effector
        '''
        tool_base_pose = self._state_client.get_pose()

        tool_pose = tool_base_pose * self._tool_delta_pose

        return tool_pose

    def get_joints(self):
        '''
        Returns:
            ndarray of shape (7,) of joint angles in radians
        '''
        return self._state_client.get_joints()

    def get_joint_torques(self):
        '''
        Returns:
            ndarray of shape (7,) of joint torques in Nm
        '''
        return self._state_client.get_joint_torques()

    def get_joint_velocities(self):
        '''
        Returns:
            ndarray of shape (7,) of joint velocities in rads/s
        '''
        return self._state_client.get_joint_velocities()

    def get_gripper_width(self):
        '''
        Returns:
            float of gripper width in meters
        '''
        return self._state_client.get_gripper_width()

    def get_gripper_is_grasped(self):
        '''
        Returns:
            True if gripper is grasping something. False otherwise
        '''
        return self._state_client.get_gripper_is_grasped()

    def get_speed(self, speed):
        '''
        Returns:
            float of current target speed parameter
        '''
        pass

    def get_tool_base_pose(self):
        '''
        Returns:
            RigidTransform of current tool base pose
        '''
        return self._tool_delta_pose.copy()

    '''
    Sets
    '''

    def set_tool_delta_pose(self, tool_delta_pose):
        '''Sets the tool pose relative to the end-effector pose

        Args:
            tool_delta_pose (RigidTransform)
        '''
        if tool_delta_pose.from_frame != 'franka_tool' \
                or tool_delta_pose.to_frame != 'franka_tool_base':
            raise ValueError('tool_delta_pose has invalid frame names! ' \
                             'Make sure it has from_frame=franka_tool, and ' \
                             'to_frame=franka_tool_base')

        self._tool_delta_pose = tool_delta_pose.copy()

    def set_speed(self, speed):
        '''Sets current target speed parameter

        Args:
            speed (float)
        '''
        pass

    '''
    Misc
    '''

    def reset_joints(self, skill_desc='', ignore_errors=True):
        '''Commands Arm to goto hardcoded home joint configuration
        '''
        self.goto_joints(FC.HOME_JOINTS,
                         duration=5,
                         skill_desc=skill_desc,
                         ignore_errors=ignore_errors)

    def reset_pose(self, skill_desc='', ignore_errors=True):
        '''Commands Arm to goto hardcoded home pose
        '''
        self.goto_pose(FC.HOME_POSE,
                       duration=5,
                       skill_desc=skill_desc,
                       ignore_errors=ignore_errors)

    def is_joints_reachable(self, joints):
        '''
        Returns:
            True if all joints within joint limits
        '''
        for i, val in enumerate(joints):
            if val <= FC.JOINT_LIMITS_MIN[i] or val >= FC.JOINT_LIMITS_MAX[i]:
                return False

        return True
def main():
    # ====================================
    # parse arguments
    # ====================================
    parser = argparse.ArgumentParser(
        description='Generate segmentation images and updated json files.')
    parser.add_argument('-d',
                        '--data-dir',
                        default=os.getcwd(),
                        help='directory containing images and json files')
    parser.add_argument(
        '-o',
        '--object-settings',
        help=
        'object_settings file where the fixed_model_transform corresponds to the poses'
        'in the frame annotation jsons.'
        'default: <data_dir>/_object_settings.json')
    parser.add_argument(
        '-t',
        '--target-object-settings',
        help=
        'if given, transform all poses into the fixed_model_transform from this file.'
    )
    args = parser.parse_args()

    if not args.object_settings:
        args.object_settings = os.path.join(args.data_dir,
                                            '_object_settings.json')
    if not args.target_object_settings:
        args.target_object_settings = args.object_settings

    # =====================
    # parse object_settings
    # =====================
    with open(args.object_settings, 'r') as f:
        object_settings_json = json.load(f)
    with open(args.target_object_settings, 'r') as f:
        target_object_settings_json = json.load(f)

    if not len(object_settings_json['exported_objects']) == len(
            target_object_settings_json['exported_objects']):
        print "FATAL: object_settings and target_object_settings do not match!"
        sys.exit(-1)
    models = {}
    for model_json, target_model_json in zip(
            object_settings_json['exported_objects'],
            target_object_settings_json['exported_objects']):
        class_name = model_json['class']
        if not class_name == target_model_json['class']:
            print "FATAL: object_settings and target_object_settings do not match!"
            sys.exit(-1)
        segmentation_class_id = model_json['segmentation_class_id']
        cuboid_dimensions = np.array(model_json['cuboid_dimensions'])

        # calculate model_transform
        fixed_model_transform_mat = np.transpose(
            np.array(model_json['fixed_model_transform']))
        fixed_model_transform = RigidTransform(
            rotation=fixed_model_transform_mat[:3, :3],
            translation=fixed_model_transform_mat[:3, 3],
            from_frame='ycb_model',
            to_frame='source_model')
        target_fixed_model_transform_mat = np.transpose(
            np.array(target_model_json['fixed_model_transform']))
        target_fixed_model_transform = RigidTransform(
            rotation=target_fixed_model_transform_mat[:3, :3],
            translation=target_fixed_model_transform_mat[:3, 3],
            from_frame='ycb_model',
            to_frame='target_model_original')
        model_transform = fixed_model_transform.dot(
            target_fixed_model_transform.inverse())

        models[class_name] = ModelConfig(class_name, segmentation_class_id,
                                         model_transform, cuboid_dimensions)

    # ==================================================
    # parse camera_settings and set up camera intrinsics
    # ==================================================
    with open(os.path.join(args.data_dir, '_camera_settings.json'), 'r') as f:
        camera_settings_json = json.load(f)['camera_settings'][0]

    camera_intrinsics = CameraIntrinsics(
        frame='camera',
        fx=camera_settings_json['intrinsic_settings']['fx'],
        fy=camera_settings_json['intrinsic_settings']['fy'],
        cx=camera_settings_json['intrinsic_settings']['cx'],
        cy=camera_settings_json['intrinsic_settings']['cy'],
        skew=camera_settings_json['intrinsic_settings']['s'],
        height=camera_settings_json['captured_image_size']['height'],
        width=camera_settings_json['captured_image_size']['width'])

    # ====================================
    # process each frame
    # ====================================
    pattern = re.compile(r'\d{3,}.json')
    json_files = sorted([
        os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir)
        if pattern.match(f)
    ])
    for json_file in json_files:
        filename_prefix = json_file[:-len('json')]
        print '\n---------------------- {}*'.format(filename_prefix)
        with open(json_file, 'r') as f:
            frame_json = json.load(f)

        updated_frame_json = process_frame(frame_json, camera_intrinsics,
                                           models)
        with open(filename_prefix + 'flipped.json', 'w') as f:
            json.dump(updated_frame_json, f, indent=2, sort_keys=True)
Example #21
0
                        T_obj_world.inverse(),
                        mat_props=table_mat_props)
        }
        for name, scene_obj in scene_objs.iteritems():
            virtual_camera.add_to_scene(name, scene_obj)

        # camera pose
        cam_dist = 0.3
        T_camera_world = RigidTransform(rotation=np.array([[0, 1,
                                                            0], [1, 0, 0],
                                                           [0, 0, -1]]),
                                        translation=[0, 0, cam_dist],
                                        from_frame=camera_intr.frame,
                                        to_frame='world')

        T_obj_camera = T_camera_world.inverse() * T_obj_world

        # show mesh
        if False:
            vis3d.figure()
            vis3d.mesh(mesh, T_obj_camera)
            vis3d.pose(RigidTransform(), alpha=0.1)
            vis3d.pose(T_obj_camera, alpha=0.1)
            vis3d.show()

        # render depth image
        render_start = time.time()
        IPython.embed()
        renders = virtual_camera.wrapped_images(mesh, [T_obj_camera],
                                                RenderMode.RGBD_SCENE,
                                                mat_props=mat_props,