コード例 #1
0
            target_pt_world = cb_points_world[target_ind[0]]

            # create robot pose relative to target point
            R_gripper_world = np.array([[1.0, 0, 0],
                                     [0, -1.0, 0],
                                     [0, 0, -1.0]])
            t_gripper_world = np.array([target_pt_world.x + config['gripper_offset_x'],
                                        target_pt_world.y + config['gripper_offset_y'],
                                        target_pt_world.z + config['gripper_offset_z']])
            T_gripper_world = RigidTransform(rotation=R_gripper_world,
                                             translation=t_gripper_world,
                                             from_frame='gripper',
                                             to_frame='cb')
            logging.info('Moving robot to point x=%f, y=%f, z=%f' %(t_gripper_world[0], t_gripper_world[1], t_gripper_world[2]))

            # move robot to pose
            y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
            y.reset_home()
            time.sleep(1)

            T_lift = RigidTransform(translation=(0,0,0.05), from_frame='cb', to_frame='cb')
            T_gripper_world_lift = T_lift * T_gripper_world
            y.right.goto_pose(T_gripper_world_lift)
            y.right.goto_pose(T_gripper_world)

            # wait for human measurement
            yesno = raw_input('Take measurement. Hit [ENTER] when done')
            y.right.goto_pose(T_gripper_world_lift)
            y.reset_home()
            y.stop()
コード例 #2
0
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config["num_patterns"]
    max_tries = config["max_tries"]

    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config["pattern_tf"])

    # initialize sensor
    sensor_frame = config["sensor_frame"]
    sensor_config = {"frame": sensor_frame}
    logging.info("Creating sensor")
    sensor = RgbdSensorFactory.sensor("ensenso", sensor_config)

    # initialize node
    rospy.init_node("register_ensenso", anonymous=True)
    rospy.wait_for_service("/%s/collect_pattern" % (sensor_frame))
    rospy.wait_for_service("/%s/estimate_pattern_pose" % (sensor_frame))

    # start sensor
    print("Starting sensor")
    sensor.start()
    time.sleep(1)
    print("Sensor initialized")

    # perform registration
    try:
        print("Collecting patterns")
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy(
                "/%s/collect_pattern" % (sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer, clear_buffer, decode,
                                   grid_spacing)
            if resp.success:
                print("Detected pattern %d" % (num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError("Failed to detect calibration pattern!")

        print("Estimating pattern pose")
        estimate_pattern_pose = rospy.ServiceProxy(
            "/%s/estimate_pattern_pose" % (sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([
            resp.pose.orientation.w,
            resp.pose.orientation.x,
            resp.pose.orientation.y,
            resp.pose.orientation.z,
        ])
        t_pattern_camera = np.array(
            [resp.pose.position.x, resp.pose.position.y, resp.pose.position.z])
        T_pattern_camera = RigidTransform(
            rotation=q_pattern_camera,
            translation=t_pattern_camera,
            from_frame="pattern",
            to_frame=sensor_frame,
        )
    except rospy.ServiceException as e:
        print("Service call failed: %s" % (str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()

    # save final transformation
    output_dir = os.path.join(config["calib_dir"], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, "%s_to_world.tf" % (sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, "%s.intr" % (sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print("Final Result for sensor %s" % (sensor_frame))
    print("Rotation: ")
    print(T_camera_world.rotation)
    print("Quaternion: ")
    print(T_camera_world.quaternion)
    print("Translation: ")
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config["use_robot"]:
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0], [0, -1.0, 0], [0, 0, -1.0]])
        t_gripper_world = np.array([
            target_pt_world.x + config["gripper_offset_x"],
            target_pt_world.y + config["gripper_offset_y"],
            target_pt_world.z + config["gripper_offset_z"],
        ])
        T_gripper_world = RigidTransform(
            rotation=R_gripper_world,
            translation=t_gripper_world,
            from_frame="gripper",
            to_frame="world",
        )

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0, 0, 0.05),
                                from_frame="world",
                                to_frame="world")
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)

        # wait for human measurement
        keyboard_input("Take measurement. Hit [ENTER] when done")
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()
コード例 #3
0
def playback(args):
    cfg = YamlConfig(args.config_path)
    demo_name = args.demo_name
    supervisor = args.supervisor
    trial_num = args.trial_num

    if cfg['mode'] not in ('poses', 'states'):
        y.stop()
        raise ValueError("Unknown playback mode! Only accepts 'poses' or 'joints'. Got {0}".format(cfg['mode']))

    # init robot
    logging.info("Init robot.")
    y = YuMiRobot()
    y.set_v(cfg['v'])
    y.set_z(cfg['z'])

    # load demo data
    demo_records = CSVModel.load(os.path.join(cfg['data_path'], 'demo_records.csv'))
    demo_record = demo_records.get_by_cols({
        'demo_name': demo_name,
        'trial_num': trial_num,
        'supervisor': supervisor
    })
    trial_path = demo_record['trial_path']
    demo_host_cfg = YamlConfig(os.path.join(trial_path, 'demo_config.yaml'))

    # parse demo trajectory
    # TODO: enforce fps
    fps = demo_host_cfg['fps']

    _, left_data = zip(*load(os.path.join(trial_path, '{0}_left.jb'.format(cfg['mode']))))
    _, right_data = zip(*load(os.path.join(trial_path, '{0}_right.jb'.format(cfg['mode']))))
    _, gripper_left_evs = zip(*load(os.path.join(trial_path, 'grippers_evs_left.jb')))
    _, gripper_right_evs = zip(*load(os.path.join(trial_path, 'grippers_evs_right.jb')))

    seqs = {
        'left': Sequence([t[1] for t in left_data]),
        'right': Sequence([t[1] for t in right_data]),
        'gripper_left': Sequence(gripper_left_evs),
        'gripper_right': Sequence(gripper_right_evs)
    }

    # subsampling
    subsample_factor = cfg['subsample']
    subsampled_seqs = {
        'left': seqs['left'].subsampler(subsample_factor),
        'right': seqs['right'].subsampler(subsample_factor),
        'gripper_left': seqs['gripper_left'].subsampler(subsample_factor, retain_features=True),
        'gripper_right': seqs['gripper_right'].subsampler(subsample_factor, retain_features=True)
    }

    # concating non-moving steps
    if cfg['concat']:
        concat_data = {
            'left': [subsampled_seqs['left'].data[0]],
            'right': [subsampled_seqs['right'].data[0]],
            'gripper_left': [subsampled_seqs['gripper_left'].data[0]],
            'gripper_right': [subsampled_seqs['gripper_right'].data[0]]
        }
        last_lp = concat_data['left'][0]
        last_rp = concat_data['right'][0]
        for t in range(1, min([len(seq.data) for seq in subsampled_seqs.values()])):
            lg_t = subsampled_seqs['gripper_left'].data[t]
            rg_t = subsampled_seqs['gripper_right'].data[t]
            lp_t = subsampled_seqs['left'].data[t]
            rp_t = subsampled_seqs['right'].data[t]

            if lg_t is not None or rg_t is not None or \
                lp_t != last_lp or rp_t != last_rp:
                concat_data['gripper_right'].append(rg_t)
                concat_data['gripper_left'].append(lg_t)
                concat_data['left'].append(lp_t)
                concat_data['right'].append(rp_t)

            last_lp = lp_t
            last_rp = rp_t
        concat_seqs = {
            'left': Sequence(concat_data['left']),
            'right': Sequence(concat_data['right']),
            'gripper_left': Sequence(concat_data['gripper_left']),
            'gripper_right': Sequence(concat_data['gripper_right']),
        }
    else:
        concat_seqs = subsampled_seqs

    N = min([len(seq.data) for seq in concat_seqs.values()])

    # processing time steps where zoning should be set to fine
    gripper_zoning = [None for _ in range(N)]
    for t in range(N-1):
        if concat_seqs['gripper_left'].data[t] != None or \
            concat_seqs['gripper_right'].data[t] != None:
            if t == 0:
                y.set_z('fine')
            else:
                gripper_zoning[t-1] = 'fine'
            gripper_zoning[t+1] = cfg['z']

    # perform setup motions
    logging.info("Loading demo and performing setups.")
    y.reset_home()
    y.open_grippers()
    demo_path = os.path.join(trial_path, '{0}.py'.format(demo_name))
    demo_obj = DemoWrapper.load(demo_path, y)
    demo_obj.setup()

    # record torque and other debug data if needed
    if cfg['record_torque']['use']:
        ysub = YuMiSubscriber()
        ysub.start()
        data_torque_left = DataStreamRecorder('torques_left', ysub.left.get_torque, cache_path=cfg['cache_path'], save_every=cfg['save_every'])
        data_torque_right = DataStreamRecorder('torques_right', ysub.right.get_torque, cache_path=cfg['cache_path'], save_every=cfg['save_every'])
        syncer = DataStreamSyncer([data_torque_left, data_torque_right], fps)
        syncer.start()
        sleep(0.5)
        syncer.pause()
        syncer.flush()
        syncer.resume(reset_time=True)
        
    # perform trajectory
    logging.info("Playing trajectory")
    for t in range(N):
        left_item = concat_seqs['left'].data[t]
        right_item = concat_seqs['right'].data[t]
        gripper_left_item = concat_seqs['gripper_left'].data[t]
        gripper_right_item = concat_seqs['gripper_right'].data[t]

        if cfg['mode'] == 'poses':
            y.left.goto_pose(left_item, relative=True, wait_for_res=False)
            y.right.goto_pose(right_item, relative=True, wait_for_res=True)
        else:
            y.left.goto_state(left_item, wait_for_res=False)
            y.right.goto_state(right_item, wait_for_res=True)

        if gripper_left_item != None and gripper_right_item != None:
            getattr(y.left, gripper_left_item)(wait_for_res=False)
            getattr(y.right, gripper_right_item)(wait_for_res=True)
        elif gripper_left_item != None:
            getattr(y.left, gripper_left_item)()
        elif gripper_right_item != None:
            getattr(y.right, gripper_right_item)()

        z = gripper_zoning[t]
        if z is not None:
            logging.info("Setting zone to {0}".format(z))
            y.set_z(z)

    if cfg['record_torque']['use']:
        syncer.pause()

        torque_model = CSVModel.get_or_create(
            os.path.join(cfg['data_path'], 'playback_torques_record.csv'),
            [
                ('supervisor', 'str'),
                ('demo_name', 'str'),
                ('trial_num', 'int'),
                ('playback_num', 'int'),
                ('playback_path', 'str')
            ]
        )

        last_torque_record = torque_model.get_by_cols({
                                                    'demo_name': demo_name,
                                                    'trial_num': trial_num,
                                                    'supervisor': supervisor
                                                }, direction=-1)
        if last_torque_record == None:
            playback_num = 1
        else:
            playback_num = last_torque_record['playback_num'] + 1

        playback_path = os.path.join(trial_path, 'playback_torques', str(playback_num))
        if not os.path.exists(playback_path):
            os.makedirs(playback_path)

        data_torque_left.save_data(playback_path)
        data_torque_right.save_data(playback_path)

        torque_model.insert({
            'supervisor': supervisor,
            'demo_name': demo_name,
            'trial_num': trial_num,
            'playback_num': playback_num,
            'playback_path': playback_path
        })

        basename = os.path.basename(args.config_path)
        target_file_path = os.path.join(playback_path, basename)
        shutil.copyfile(args.config_path, target_file_path)

        ysub.stop()
        syncer.stop()

    # perform takedown motions
    logging.info("Taking down..")
    y.set_v(cfg['v'])
    y.set_z(cfg['z'])
    demo_obj.takedown()

    y.reset_home()
    y.open_grippers()

    y.stop()
コード例 #4
0
def playback(args):
    cfg = YamlConfig(args.config_path)
    demo_name = args.demo_name
    supervisor = args.supervisor
    trial_num = args.trial_num

    if cfg['mode'] not in ('poses', 'states'):
        y.stop()
        raise ValueError(
            "Unknown playback mode! Only accepts 'poses' or 'joints'. Got {0}".
            format(cfg['mode']))

    # init robot
    logging.info("Init robot.")
    y = YuMiRobot()
    y.set_v(cfg['v'])
    y.set_z(cfg['z'])

    # load demo data
    demo_records = CSVModel.load(
        os.path.join(cfg['data_path'], 'demo_records.csv'))
    demo_record = demo_records.get_by_cols({
        'demo_name': demo_name,
        'trial_num': trial_num,
        'supervisor': supervisor
    })
    trial_path = demo_record['trial_path']
    demo_host_cfg = YamlConfig(os.path.join(trial_path, 'demo_config.yaml'))

    # parse demo trajectory
    # TODO: enforce fps
    fps = demo_host_cfg['fps']

    _, left_data = zip(
        *load(os.path.join(trial_path, '{0}_left.jb'.format(cfg['mode']))))
    _, right_data = zip(
        *load(os.path.join(trial_path, '{0}_right.jb'.format(cfg['mode']))))
    _, gripper_left_evs = zip(
        *load(os.path.join(trial_path, 'grippers_evs_left.jb')))
    _, gripper_right_evs = zip(
        *load(os.path.join(trial_path, 'grippers_evs_right.jb')))

    seqs = {
        'left': Sequence([t[1] for t in left_data]),
        'right': Sequence([t[1] for t in right_data]),
        'gripper_left': Sequence(gripper_left_evs),
        'gripper_right': Sequence(gripper_right_evs)
    }

    # subsampling
    subsample_factor = cfg['subsample']
    subsampled_seqs = {
        'left':
        seqs['left'].subsampler(subsample_factor),
        'right':
        seqs['right'].subsampler(subsample_factor),
        'gripper_left':
        seqs['gripper_left'].subsampler(subsample_factor,
                                        retain_features=True),
        'gripper_right':
        seqs['gripper_right'].subsampler(subsample_factor,
                                         retain_features=True)
    }

    # concating non-moving steps
    if cfg['concat']:
        concat_data = {
            'left': [subsampled_seqs['left'].data[0]],
            'right': [subsampled_seqs['right'].data[0]],
            'gripper_left': [subsampled_seqs['gripper_left'].data[0]],
            'gripper_right': [subsampled_seqs['gripper_right'].data[0]]
        }
        last_lp = concat_data['left'][0]
        last_rp = concat_data['right'][0]
        for t in range(
                1, min([len(seq.data) for seq in subsampled_seqs.values()])):
            lg_t = subsampled_seqs['gripper_left'].data[t]
            rg_t = subsampled_seqs['gripper_right'].data[t]
            lp_t = subsampled_seqs['left'].data[t]
            rp_t = subsampled_seqs['right'].data[t]

            if lg_t is not None or rg_t is not None or \
                lp_t != last_lp or rp_t != last_rp:
                concat_data['gripper_right'].append(rg_t)
                concat_data['gripper_left'].append(lg_t)
                concat_data['left'].append(lp_t)
                concat_data['right'].append(rp_t)

            last_lp = lp_t
            last_rp = rp_t
        concat_seqs = {
            'left': Sequence(concat_data['left']),
            'right': Sequence(concat_data['right']),
            'gripper_left': Sequence(concat_data['gripper_left']),
            'gripper_right': Sequence(concat_data['gripper_right']),
        }
    else:
        concat_seqs = subsampled_seqs

    N = min([len(seq.data) for seq in concat_seqs.values()])

    # processing time steps where zoning should be set to fine
    gripper_zoning = [None for _ in range(N)]
    for t in range(N - 1):
        if concat_seqs['gripper_left'].data[t] != None or \
            concat_seqs['gripper_right'].data[t] != None:
            if t == 0:
                y.set_z('fine')
            else:
                gripper_zoning[t - 1] = 'fine'
            gripper_zoning[t + 1] = cfg['z']

    # perform setup motions
    logging.info("Loading demo and performing setups.")
    y.reset_home()
    y.open_grippers()
    demo_path = os.path.join(trial_path, '{0}.py'.format(demo_name))
    demo_obj = DemoWrapper.load(demo_path, y)
    demo_obj.setup()

    # record torque and other debug data if needed
    if cfg['record_torque']['use']:
        ysub = YuMiSubscriber()
        ysub.start()
        data_torque_left = DataStreamRecorder('torques_left',
                                              ysub.left.get_torque,
                                              cache_path=cfg['cache_path'],
                                              save_every=cfg['save_every'])
        data_torque_right = DataStreamRecorder('torques_right',
                                               ysub.right.get_torque,
                                               cache_path=cfg['cache_path'],
                                               save_every=cfg['save_every'])
        syncer = DataStreamSyncer([data_torque_left, data_torque_right], fps)
        syncer.start()
        sleep(0.5)
        syncer.pause()
        syncer.flush()
        syncer.resume(reset_time=True)

    # perform trajectory
    logging.info("Playing trajectory")
    for t in range(N):
        left_item = concat_seqs['left'].data[t]
        right_item = concat_seqs['right'].data[t]
        gripper_left_item = concat_seqs['gripper_left'].data[t]
        gripper_right_item = concat_seqs['gripper_right'].data[t]

        if cfg['mode'] == 'poses':
            y.left.goto_pose(left_item, relative=True, wait_for_res=False)
            y.right.goto_pose(right_item, relative=True, wait_for_res=True)
        else:
            y.left.goto_state(left_item, wait_for_res=False)
            y.right.goto_state(right_item, wait_for_res=True)

        if gripper_left_item != None and gripper_right_item != None:
            getattr(y.left, gripper_left_item)(wait_for_res=False)
            getattr(y.right, gripper_right_item)(wait_for_res=True)
        elif gripper_left_item != None:
            getattr(y.left, gripper_left_item)()
        elif gripper_right_item != None:
            getattr(y.right, gripper_right_item)()

        z = gripper_zoning[t]
        if z is not None:
            logging.info("Setting zone to {0}".format(z))
            y.set_z(z)

    if cfg['record_torque']['use']:
        syncer.pause()

        torque_model = CSVModel.get_or_create(
            os.path.join(cfg['data_path'], 'playback_torques_record.csv'),
            [('supervisor', 'str'), ('demo_name', 'str'), ('trial_num', 'int'),
             ('playback_num', 'int'), ('playback_path', 'str')])

        last_torque_record = torque_model.get_by_cols(
            {
                'demo_name': demo_name,
                'trial_num': trial_num,
                'supervisor': supervisor
            },
            direction=-1)
        if last_torque_record == None:
            playback_num = 1
        else:
            playback_num = last_torque_record['playback_num'] + 1

        playback_path = os.path.join(trial_path, 'playback_torques',
                                     str(playback_num))
        if not os.path.exists(playback_path):
            os.makedirs(playback_path)

        data_torque_left.save_data(playback_path)
        data_torque_right.save_data(playback_path)

        torque_model.insert({
            'supervisor': supervisor,
            'demo_name': demo_name,
            'trial_num': trial_num,
            'playback_num': playback_num,
            'playback_path': playback_path
        })

        basename = os.path.basename(args.config_path)
        target_file_path = os.path.join(playback_path, basename)
        shutil.copyfile(args.config_path, target_file_path)

        ysub.stop()
        syncer.stop()

    # perform takedown motions
    logging.info("Taking down..")
    y.set_v(cfg['v'])
    y.set_z(cfg['z'])
    demo_obj.takedown()

    y.reset_home()
    y.open_grippers()

    y.stop()
コード例 #5
0
def register_ensenso(config):
    # set parameters
    average = True
    add_to_buffer = True
    clear_buffer = False
    decode = True
    grid_spacing = -1

    # read parameters
    num_patterns = config['num_patterns']
    max_tries = config['max_tries']
    
    # load tf pattern to world
    T_pattern_world = RigidTransform.load(config['pattern_tf'])
    
    # initialize sensor
    sensor_frame = config['sensor_frame']
    sensor = EnsensoSensor(sensor_frame)

    # initialize node
    rospy.init_node('register_ensenso', anonymous=True)
    rospy.wait_for_service('/%s/collect_pattern' %(sensor_frame))
    rospy.wait_for_service('/%s/estimate_pattern_pose' %(sensor_frame))
    
    # start sensor
    print('Starting sensor')
    sensor.start()
    time.sleep(1)
    print('Sensor initialized')
    
    # perform registration
    try:
        print('Collecting patterns')
        num_detected = 0
        i = 0
        while num_detected < num_patterns and i < max_tries:
            collect_pattern = rospy.ServiceProxy('/%s/collect_pattern' %(sensor_frame), CollectPattern)
            resp = collect_pattern(add_to_buffer,
                                   clear_buffer,
                                   decode,
                                   grid_spacing)
            if resp.success:
                print('Detected pattern %d' %(num_detected))
                num_detected += 1
            i += 1

        if i == max_tries:
            raise ValueError('Failed to detect calibration pattern!')
            
        print('Estimating pattern pose')
        estimate_pattern_pose = rospy.ServiceProxy('/%s/estimate_pattern_pose' %(sensor_frame), EstimatePatternPose)
        resp = estimate_pattern_pose(average)

        q_pattern_camera = np.array([resp.pose.orientation.w,
                                     resp.pose.orientation.x,
                                     resp.pose.orientation.y,
                                     resp.pose.orientation.z])
        t_pattern_camera = np.array([resp.pose.position.x,
                                     resp.pose.position.y,
                                     resp.pose.position.z])
        T_pattern_camera = RigidTransform(rotation=q_pattern_camera,
                                          translation=t_pattern_camera,
                                          from_frame='pattern',
                                          to_frame=sensor_frame)
    except rospy.ServiceException as e:
        print('Service call failed: %s' %(str(e)))

    # compute final transformation
    T_camera_world = T_pattern_world * T_pattern_camera.inverse()
    
    # save final transformation
    output_dir = os.path.join(config['calib_dir'], sensor_frame)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    pose_filename = os.path.join(output_dir, '%s_to_world.tf' %(sensor_frame))
    T_camera_world.save(pose_filename)
    intr_filename = os.path.join(output_dir, '%s.intr' %(sensor_frame))
    sensor.ir_intrinsics.save(intr_filename)

    # log final transformation
    print('Final Result for sensor %s' %(sensor_frame))
    print('Rotation: ')
    print(T_camera_world.rotation)
    print('Quaternion: ')
    print(T_camera_world.quaternion)
    print('Translation: ')
    print(T_camera_world.translation)

    # stop sensor
    sensor.stop()

    # move robot to calib pattern center
    if config['use_robot']:  
        # create robot pose relative to target point
        target_pt_camera = Point(T_pattern_camera.translation, sensor_frame)
        target_pt_world = T_camera_world * target_pt_camera
        R_gripper_world = np.array([[1.0, 0, 0],
                                    [0, -1.0, 0],
                                    [0, 0, -1.0]])
        t_gripper_world = np.array([target_pt_world.x + config['gripper_offset_x'],
                                    target_pt_world.y + config['gripper_offset_y'],
                                    target_pt_world.z + config['gripper_offset_z']])
        T_gripper_world = RigidTransform(rotation=R_gripper_world,
                                         translation=t_gripper_world,
                                         from_frame='gripper',
                                         to_frame='world')

        # move robot to pose
        y = YuMiRobot(tcp=YMC.TCP_SUCTION_STIFF)
        y.reset_home()
        time.sleep(1)

        T_lift = RigidTransform(translation=(0,0,0.05), from_frame='world', to_frame='world')
        T_gripper_world_lift = T_lift * T_gripper_world

        y.right.goto_pose(T_gripper_world_lift)
        y.right.goto_pose(T_gripper_world)
        
        # wait for human measurement
        yesno = raw_input('Take measurement. Hit [ENTER] when done')
        y.right.goto_pose(T_gripper_world_lift)
        y.reset_home()
        y.stop()