Пример #1
0
def main(unused_argv):
    # SAY : a PyBullet gym class environment.
    # Initialize environment and task.
    env = Environment(
        FLAGS.assets_root,
        disp=FLAGS.disp,
        shared_memory=FLAGS.shared_memory,
        hz=480)
    task = tasks.names[FLAGS.task]()
    task.mode = FLAGS.mode

    # SAY: For each task, we have a scripted oracle that performs the task with ground truth information.
    # Initialize scripted oracle agent and dataset.
    agent = task.oracle(env)
    # SAY: this dataset resembles RL convention, storing experiences in episode.
    dataset = Dataset(os.path.join(FLAGS.data_dir, f'{FLAGS.task}-{task.mode}'))

    # Train seeds are even and test seeds are odd.
    seed = dataset.max_seed
    if seed < 0:
        seed = -1 if (task.mode == 'test') else -2

    # SAY: experience collection process resembles reinforcement learning.
    # Collect training data from oracle demonstrations.
    while dataset.n_episodes < FLAGS.n:
        print(f'Oracle demonstration: {dataset.n_episodes + 1}/{FLAGS.n}')
        episode, total_reward = [], 0
        seed += 2
        # SAY: set random seed to reproduce the result.
        np.random.seed(seed)
        env.set_task(task)
        obs = env.reset()
        obs = env._get_obs(skip_fixed=True)
        info = None
        reward = 0
        for _ in range(task.max_steps):
            act = agent.act(obs, info)
            episode.append((obs, act, reward, info))
            obs, reward, done, info = env.step(act)
            obs = env._get_obs(skip_fixed=True)
            total_reward += reward
            print(f'Total Reward: {total_reward} Done: {done}')
            if done:
                break
        # say: append the final obs as goal, fairly good, perfect!
        obs = env._get_obs(skip_fixed=True)
        episode.append((obs, None, reward, info))

        # SAY: some interesting games may worth being checked out.
        # Only save completed demonstrations.
        # TODO(andyzeng): add back deformable logic.
        if total_reward > 0.99:
            dataset.add(seed, episode)
Пример #2
0
def main(unused_argv):
    # Initialize environment and task.
    env = Environment(
        FLAGS.assets_root,
        disp=FLAGS.disp,
        shared_memory=FLAGS.shared_memory,
        hz=480)
    task = tasks.names[FLAGS.task]()
    task.mode = FLAGS.mode

    # Initialize scripted oracle agent and dataset.
    agent = task.oracle(env)
    dataset = Dataset(os.path.join(FLAGS.data_dir, f'{FLAGS.task}-{task.mode}'))

    # Train seeds are even and test seeds are odd.
    seed = dataset.max_seed
    if seed < 0:
        seed = -1 if (task.mode == 'test') else -2

    count = 0

    # Collect training data from oracle demonstrations.
    while dataset.n_episodes < FLAGS.n:
        print(f'Oracle demonstration: {dataset.n_episodes + 1}/{FLAGS.n}')
        episode, total_reward = [], 0
        seed += 2
        np.random.seed(seed)
        env.set_task(task)
        obs = env.reset()
        info = None
        reward = 0
        count += 1
        for _ in range(task.max_steps):
            act = agent.act(obs, info)
            episode.append((obs, act, reward, info))
            obs, reward, done, info = env.step(act)
            total_reward += reward
            print(f'Total Reward: {total_reward} Done: {done} Episode: {dataset.n_episodes}')
            if done:
                break
        episode.append((obs, None, reward, info))

        # Only save completed demonstrations.
        # TODO(andyzeng): add back deformable logic.
        if total_reward > 0.99:
            dataset.add(seed, episode)
            # print(dataset.shape())
    if dataset.n_episodes == FLAGS.n:
        pybullet.disconnect()
Пример #3
0
def main(unused_argv):
    vrb = ViveRobotBridge()
    
    
    rospy.init_node('raven_vive_teleop')
     

    temp_flag = 0
    
    pre_position = [0,0,0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)
    
    
    env = Environment(
      assets_root,
      disp=True,
      hz=60)
    
    task = tasks.names[task_name]()
    task.mode = mode
    agent = task.oracle(env)
    env.set_task(task)
    obs = env.reset()
    info = None
    ee_pose = ((0.46562498807907104, -0.375, 0.3599780201911926), (0.0, 0.0, 0.0, 1.0))
    ee_position = [0.0, 0.0, 0.0]

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    

    while not rospy.is_shutdown():
  
        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        
        if vrb.trigger_pressed_event == 1:
            # get current pose of the end-effector
            ee_position_ref = list(p.getLinkState(env.ur5, env.ee_tip)[4])
            #ee_orientation = p.getLinkState(self.ur5, self.ee_tip)[5]

            vrb.trigger_pressed_event = 0
            print("I--trigger pressed!\n")

        if vrb.trigger_released_event == 1:
            vrb.trigger_released_event = 0
            print("O--trigger released!\n") 

        if vrb.trigger_current_status == 1:    
            vive_rotation = vrb.vive_controller_rotation
            ee_rotation = kdl.Rotation.Quaternion(vive_rotation[0],vive_rotation[1],vive_rotation[2],vive_rotation[3])
            r1 = kdl.Rotation.RotZ(-math.pi/2)
            ee_rotation = r1 * ee_rotation
            ee_rotation.DoRotX(math.pi/2)
            
            ee_rotation.DoRotZ(math.pi/2)

            ee_rotation.DoRotY(math.pi)
            
            ee_orientation = ee_rotation.GetQuaternion()
            
            # br.sendTransform(
            #     (0.0, 0.0, 1.0),
            #     ee_rotation.GetQuaternion(),
            #     rospy.Time.now(),
            #     "transformed_controller_frame",
            #     "world"
            # )
            
            #ee_orientation = (0,0,0,1)
            ee_position[0] = ee_position_ref[0] + vrb.vive_controller_translation[1]
            ee_position[1] = ee_position_ref[1] - vrb.vive_controller_translation[0]
            # z axis limit
            z_control = ee_position_ref[2] + vrb.vive_controller_translation[2]
            if z_control < 0.02:
                z_control = 0.02
            ee_position[2] = z_control

            ee_pose = (tuple(ee_position), tuple(ee_orientation))
            #env.movep(ee_pose)
            #rci.publish_pose_message(ee_pose)
            joint_position = rci.solve_ik(ee_pose)
            rci.movej_fast(joint_position)
            rci.publish_joint_states_msg()
            #targj = env.solve_ik(ee_pose)
            #movej(env,targj, speed=0.01)
            #rci.movej(env,rci.joint_states)
            # joint_state_msg = JointState()
            # joint_state_msg.header.stamp = rospy.Time().now()
            # joint_state_msg.name = ['shoulder_pan_joint',
            #                         'shoulder_lift_joint',
            #                         'elbow_joint',
            #                         'wrist_1_joint',
            #                         'wrist_2_joint',
            #                         'wrist_3_joint']
            # joint_state_msg.position =  targj      
            # joint_state_pub.publish(joint_state_msg)         
            # add a marker to indicate the projection of the ee on the workspace
            
            marker_head_point = [ee_position[0], ee_position[1], 0.05]
            marker_tail_point = [ee_position[0], ee_position[1], 0.06]
            p.addUserDebugLine( marker_head_point, 
                                marker_tail_point, 
                                lineColorRGB=[1, 0, 0], 
                                lifeTime=0.2, 
                                lineWidth=3)
            if env.ee.check_grasp() == True:
                print("grasp succeed!")
                
                env.reset()
                ee_position_ref = list(p.getLinkState(env.ur5, env.ee_tip)[4])
        
        
        if vrb.grasp == 1:
            env.ee.activate()
        else:
            env.ee.release()
        
        
        p.stepSimulation()
        rate.sleep()
Пример #4
0
def main(unused_argv):
    # Configure which GPU to use.
    cfg = tf.config.experimental
    gpus = cfg.list_physical_devices('GPU')
    if not gpus:
        print('No GPUs detected. Running with CPU.')
    else:
        cfg.set_visible_devices(gpus[FLAGS.gpu], 'GPU')

    # Configure how much GPU to use (in Gigabytes).
    if FLAGS.gpu_limit is not None:
        mem_limit = 1024 * FLAGS.gpu_limit
        dev_cfg = [cfg.VirtualDeviceConfiguration(memory_limit=mem_limit)]
        cfg.set_virtual_device_configuration(gpus[0], dev_cfg)

    # Initialize environment and task.
    env = Environment(FLAGS.assets_root,
                      disp=FLAGS.disp,
                      shared_memory=FLAGS.shared_memory,
                      hz=480)
    task = tasks.names[FLAGS.task]()
    task.mode = 'test'

    # Load test dataset.
    ds = dataset.Dataset(os.path.join(FLAGS.data_dir, f'{FLAGS.task}-test'))

    # Run testing for each training run.
    for train_run in range(FLAGS.n_runs):
        name = f'{FLAGS.task}-{FLAGS.agent}-{FLAGS.n_demos}-{train_run}'

        # Initialize agent.
        np.random.seed(train_run)
        tf.random.set_seed(train_run)
        agent = agents.names[FLAGS.agent](name, FLAGS.task, FLAGS.root_dir)

        # # Run testing every interval.
        # for train_step in range(0, FLAGS.n_steps + 1, FLAGS.interval):

        # Load trained agent.
        if FLAGS.n_steps > 0:
            agent.load(FLAGS.n_steps)

        # Generate n_demos number of random episodes from the test set
        episode_idx = np.random.choice(range(ds.n_episodes), FLAGS.n_demos,
                                       False)

        # Run testing and save total rewards with last transition info.
        results = []
        # for i in range(ds.n_episodes):
        for i in range(FLAGS.n_demos):
            print(f'Test: episode {episode_idx[i]} - {i + 1}/{FLAGS.n_demos}')
            episode, seed = ds.load(episode_idx[i])
            goal = episode[-1]
            total_reward = 0
            np.random.seed(seed)
            env.seed(seed)
            env.set_task(task)
            obs = env.reset()
            info = None
            reward = 0
            for _ in range(task.max_steps):
                act = agent.act(obs, info, goal)
                obs, reward, done, info = env.step(act)
                total_reward += reward
                print(f'Total Reward: {total_reward} Done: {done}')
                if done:
                    break
            results.append((total_reward, info))

            # Save results.
            with tf.io.gfile.GFile(
                    os.path.join(FLAGS.root_dir,
                                 f'{name}-{FLAGS.n_steps}.pkl'), 'wb') as f:
                pickle.dump(results, f)
def main():
    rospy.init_node('raven_vive_teleop')
    rate = rospy.Rate(60.0)
    train_dataset = Dataset(os.path.join(dataset_root,
                                         'ravens_demo-1623923819265029807'),
                            use_aux=True)
    max_demos = train_dataset.n_episodes

    episodes = np.random.choice(range(max_demos), max_demos, False)
    print(episodes)
    train_dataset.set(episodes)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None

    seed = 0

    agent = bc_softmax_aux.BC_Agent()
    #agent = bc_softmax2d_aux.BC_Agent()
    agent.load_pretrained_model(model_save_rootdir +
                                "pretrain_model_4400_epoch")
    agent.act(obs)

    episode_steps = 0
    n_episode = 1
    aux = {
        'ee_pose': ((0, 0, 0), (0, 0, 0, 1)),
        'obj_pose': ((0, 0, 0), (0, 0, 0, 1))
    }

    plt.ion()
    im_color = obs['color'][0]
    img_draw = plt.imshow(im_color, interpolation='nearest', cmap='gray')

    while not rospy.is_shutdown():
        (obs, act, _, _, aux_dataset), _ = train_dataset.sample()

        im_color = obs['color'][0]
        img_draw.set_data(im_color)
        plt.pause(0.00000001)

        action, predicted_aux = agent.act(obs, ret_aux=True)
        #if action != None:
        #    print(action)
        #obj_pose = predicted_aux['predicted_obj_pose']
        obj_pose = predicted_aux['predicted_ee_position']
        #obj_pose = aux['ee_pose']
        marker_head_point = [obj_pose[0], obj_pose[1], obj_pose[2]]
        marker_tail_point = [obj_pose[0], obj_pose[1], obj_pose[2] + 0.01]
        p.addUserDebugLine(marker_head_point,
                           marker_tail_point,
                           lineColorRGB=[0, 1, 0],
                           lifeTime=3,
                           lineWidth=5)

        obs, reward, done, info, aux = env.step_simple(action,
                                                       use_aux=True,
                                                       stand_still=True)
        #obs, reward, done, info, aux = env.step_simple(action,use_aux = True)
        plt.pause(3)
        print(aux_dataset)
        print(predicted_aux)
        auxg = np.array(aux_dataset['ee_pose'][0], dtype=float)
        auxp = np.array(predicted_aux['predicted_ee_position'], dtype=float)
        d = np.sum(np.square(auxg - auxp)) / 3
        print('mse:%f\n' % d)

        # im_depth = obs['depth'][0]
        # img_draw.set_data(im_depth)
        # plt.pause(0.00000001)
        # plt.show()
        # print( im_color.shape)

        # print(obs['color'])
        s = "Episode:%d, steps:%d" % (n_episode, episode_steps)
        print(s)
        if (done):
            n_episode += 1
            episode_steps = 0

            seed += 1

            episode = []

        else:
            episode_steps += 1

        if episode_steps > 100:
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()

        rate.sleep()
Пример #6
0
def main(unused_argv):
    
    
    
    
    rospy.init_node('raven_vive_teleop')
     

    temp_flag = 0
    
    pre_position = [0,0,0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)
    
    
    env = Environment(
      assets_root,
      disp=True,
      hz=60)
    
    task = tasks.names[task_name]()
    task.mode = mode

    
    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = train_bc.BC_Agent()
    agent.load_pretrained_model(model_save_rootdir + "model_500_epoch")
    agent.act(obs)
    f = open(model_save_rootdir+ "rollout_log.txt","w+")
    episode_steps = 0
    n_episode = 1
    while not rospy.is_shutdown():
  
        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []
            episode_steps = 0
            obs = env.reset()
        
        action = agent.act(obs)
        #if action != None:
        #    print(action)
    
        
        obs, reward, done, info = env.step_simple(action)
        im_depth = obs['depth'][0]
        img_draw.set_data(im_depth)
        plt.pause(0.00000001)
        plt.show()
        # print( im_color.shape)

        # print(obs['color'])
        s = "Episode:%d, steps:%d"%(n_episode, episode_steps)
        print(s)
        if(done):
            n_episode += 1
            episode_steps = 0
            s += ", succeed!\n" 
            f.write(s)
            f.flush()
            seed += 1
            dataset.add(seed, episode)
            episode = []

        else:
            episode_steps += 1

        if episode_steps > 100:
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()
            s += ", failed!\n" 
            f.write(s)
            f.flush()
        


        

        
        rate.sleep()
Пример #7
0
def main(unused_argv):
    vrb = ViveRobotBridge()
    #    vrb.__init__()

    rospy.init_node('vive_listener')

    tfBuffer = tf2_ros.Buffer()
    listener = tf2_ros.TransformListener(tfBuffer)

    #    pub = rospy.Publisher('tarpos_pub', geometry_msgs.msg.Transform, queue_size=1)
    #    _joy_sub = rospy.Subscriber('/vive/controller_LHR_FFFF3F47/joy', Joy, vive_controller_button_callback, queue_size=1)

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]

    rate = rospy.Rate(100.0)

    limits = 0.02

    env = Environment(FLAGS.assets_root,
                      disp=FLAGS.disp,
                      shared_memory=FLAGS.shared_memory,
                      hz=480)
    task = tasks.names[FLAGS.task]()
    task.mode = FLAGS.mode
    agent = task.oracle(env)
    env.set_task(task)
    obs = env.reset()
    info = None
    ee_pose = ((0.46562498807907104, -0.375, 0.3599780201911926), (0.0, 0.0,
                                                                   0.0, 1.0))
    while not rospy.is_shutdown():
        #act = agent.act(obs, info)

        #obs, reward, done, info = env.step(act)

        env.movep(ee_pose)
        if vrb.grasp == 1:
            env.ee.activate()
        else:
            env.ee.release()

        try:
            trans = tfBuffer.lookup_transform('world',
                                              'controller_LHR_FF777F05',
                                              rospy.Time())
#            rospy.loginfo(trans)
        except (tf2_ros.LookupException, tf2_ros.ConnectivityException,
                tf2_ros.ExtrapolationException):
            rospy.loginfo("error")
            rate.sleep()
            continue

        if vrb.offset_flag == 1:
            if temp_flag == 0:
                #                vrb.offset[0] = trans.transform.translation.x
                #                vrb.offset[1] = trans.transform.translation.y
                #                vrb.offset[2] = trans.transform.translation.z
                #                print(vrb.offset)
                pre_position[0] = trans.transform.translation.x
                pre_position[1] = trans.transform.translation.y
                pre_position[2] = trans.transform.translation.z

                pre_pose[0] = trans.transform.rotation.x
                pre_pose[1] = trans.transform.rotation.y
                pre_pose[2] = trans.transform.rotation.z
                pre_pose[3] = trans.transform.rotation.w
            else:
                msg = geometry_msgs.msg.Transform()
                #                msg.translation.x = (trans.transform.translation.x-pre_position[0])/10
                #                msg.translation.y = (trans.transform.translation.y-pre_position[1])/10
                #                msg.translation.z = (trans.transform.translation.z-pre_position[2])/10

                #                compute delta distance
                msg.translation.x = trans.transform.translation.x - pre_position[
                    0]
                msg.translation.y = trans.transform.translation.y - pre_position[
                    1]
                msg.translation.z = trans.transform.translation.z - pre_position[
                    2]

                #                if msg.translation.x >0.02:
                #                    msg.translation.x = 0.02
                #                if msg.translation.y >0.02:
                #                    msg.translation.y = 0.02
                #                if msg.translation.z >0.02:
                #                    msg.translation.z = 0.02
                #                if msg.translation.x <-0.02:
                #                    msg.translation.x = -0.02
                #                if msg.translation.y <-0.02:
                #                    msg.translation.y = -0.02
                #                if msg.translation.z <-0.02:
                #                    msg.translation.z = -0.02

                if msg.translation.x > limits:
                    msg.translation.x = limits
                if msg.translation.y > limits:
                    msg.translation.y = limits
                if msg.translation.z > limits:
                    msg.translation.z = limits
                if msg.translation.x < -limits:
                    msg.translation.x = -limits
                if msg.translation.y < -limits:
                    msg.translation.y = -limits
                if msg.translation.z < -limits:
                    msg.translation.z = -limits

                print(msg.translation)

                #                temp[0] = trans.transform.rotation.x
                #                temp[1] = trans.transform.rotation.y
                #                temp[2] = trans.transform.rotation.z
                #                temp[3] = trans.transform.rotation.w
                #
                #
                #
                #                q = Quaternion(pre_pose) * Quaternion(temp).inverse
                #
                #
                #                msg.rotation.x = q.x
                #                msg.rotation.y = q.y
                #                msg.rotation.z = q.z
                #                msg.rotation.w = q.w#

                msg.rotation.x = trans.transform.rotation.x
                msg.rotation.y = trans.transform.rotation.y
                msg.rotation.z = trans.transform.rotation.z
                msg.rotation.w = trans.transform.rotation.w

                ee_position = list(p.getLinkState(env.ur5, env.ee_tip)[4])
                #ee_orientation = p.getLinkState(self.ur5, self.ee_tip)[5]
                ee_orientation = (0, 0, 0, 1)
                ee_position[0] = ee_position[0] + msg.translation.y
                ee_position[1] = ee_position[1] - msg.translation.x
                # z axis limit
                z_control = ee_position[2] + msg.translation.z
                if z_control < 0.02:
                    z_control = 0.02
                ee_position[2] = z_control

                ee_pose = (tuple(ee_position), ee_orientation)

                # rectified quaternion

                #                theta_x = np.arcsin(2*(trans.transform.rotation.w*trans.transform.rotation.y
                #                                       - trans.transform.rotation.z*trans.transform.rotation.x))
                #                temp = (trans.transform.rotation.x**2 + trans.transform.rotation.z**2)**0.5
                #                z_v = trans.transform.rotation.z / temp
                #                x_v = trans.transform.rotation.x / temp
                #                msg.rotation.x = x_v * np.sin(theta_x)#0#np.sin(0.5*theta_x)
                #                msg.rotation.y = 0#y_v * np.sin(theta_x)#0
                #                msg.rotation.z = z_v * np.sin(theta_x)
                #                msg.rotation.w = trans.transform.rotation.w
                #
                #                msg.translation.x = 0
                #                msg.translation.y = 0
                #                msg.translation.z = 0
                #
                #                msg.rotation.x = 0
                #                msg.rotation.y = 0
                #                msg.rotation.z = 0
                #                msg.rotation.w = 1

                print(msg.rotation)

                vrb.pub.publish(msg)

                pre_position[0] = trans.transform.translation.x
                pre_position[1] = trans.transform.translation.y
                pre_position[2] = trans.transform.translation.z

        temp_flag = vrb.offset_flag
        rate.sleep()
def main(unused_argv):

    rospy.init_node('raven_vive_teleop')

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(
        os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = bc_softmax_aux.BC_Agent()
    #agent = bc_softmax2d_aux.BC_Agent()
    agent.load_pretrained_model(model_save_rootdir + "model_8200_epoch")
    agent.act(obs)
    f = open(model_save_rootdir + "rollout_log.txt", "w+")
    episode_steps = 0
    n_episode = 1
    aux = {
        'ee_pose': ((0, 0, 0), (0, 0, 0, 1)),
        'obj_pose': ((0, 0, 0), (0, 0, 0, 1))
    }
    while not rospy.is_shutdown():

        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()

        action, predicted_aux = agent.act(obs, ret_aux=True)
        #if action != None:
        #    print(action)
        obj_pose = predicted_aux['predicted_obj_pose']
        #obj_pose = predicted_aux['predicted_ee_position']
        #obj_pose = aux['ee_pose']
        marker_head_point = [obj_pose[0], obj_pose[1], obj_pose[2]]
        marker_tail_point = [obj_pose[0], obj_pose[1], obj_pose[2] + 0.01]
        p.addUserDebugLine(marker_head_point,
                           marker_tail_point,
                           lineColorRGB=[0, 1, 0],
                           lifeTime=0.2,
                           lineWidth=5)

        #obs, reward, done, info, aux = env.step_simple(action,use_aux = True, stand_still = True)
        obs, reward, done, info, aux = env.step_simple(action, use_aux=True)
        print(aux)
        print(predicted_aux)
        # im_depth = obs['depth'][0]
        # img_draw.set_data(im_depth)
        # plt.pause(0.00000001)
        # plt.show()
        # print( im_color.shape)

        # print(obs['color'])
        s = "Episode:%d, steps:%d" % (n_episode, episode_steps)
        print(s)
        if (done):
            n_episode += 1
            episode_steps = 0
            s += ", succeed!\n"
            f.write(s)
            f.flush()
            seed += 1
            dataset.add(seed, episode)
            episode = []

        else:
            episode_steps += 1

        if episode_steps > 100:
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()
            s += ", failed!\n"
            f.write(s)
            f.flush()

        rate.sleep()
Пример #9
0
def main(unused_argv):

    rospy.init_node('raven_vive_teleop')

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(
        os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = random_action_agent(env)

    f = open(model_save_rootdir + "rollout_log.txt", "w+")
    episode_steps = 0
    n_episode = 1
    while not rospy.is_shutdown():

        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []

            obs = env.reset()

        action = agent.act()
        #if action != None:
        #    print(action)
        i = 30
        while (i > 0):
            obs, reward, done, info, aux = env.step_simple(action,
                                                           use_aux=True)
            i -= 1
        state = get_state_ground_truth(env)
        print(state)
        print(aux)
        env.reset()

        rate.sleep()
Пример #10
0
def main(unused_argv):
    vrb = ViveRobotBridge()
    #    vrb.__init__()

    rospy.init_node('raven_vive_teleop')

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]

    rate = rospy.Rate(60.0)

    limits = 0.02

    env = Environment(assets_root, disp=True, hz=60)
    task = tasks.names[task_name]()
    task.mode = mode
    agent = task.oracle(env)
    env.set_task(task)
    obs = env.reset()
    info = None
    ee_pose = ((0.46562498807907104, -0.375, 0.3599780201911926), (0.0, 0.0,
                                                                   0.0, 1.0))
    ee_position = [0.0, 0.0, 0.0]

    while not rospy.is_shutdown():
        """ print(vrb.vive_controller_translation)
        print(vrb.offset_flag)
        print(vrb.grasp)
        continue """
        #p.stepSimulation()
        #p.getCameraImage(480, 320)

        if vrb.trigger_pressed_event == 1:
            # get current pose of the end-effector
            ee_position_ref = list(p.getLinkState(env.ur5, env.ee_tip)[4])
            ee_orientation = p.getLinkState(env.ur5, env.ee_tip)[5]

            vrb.trigger_pressed_event = 0
#            print("I--trigger pressed!\n")

        if vrb.trigger_released_event == 1:
            vrb.trigger_released_event = 0
#            print("O--trigger released!\n")

        if vrb.trigger_current_status == 1:

            ee_orientation = vrb.vive_controller_rotation
            #            ee_orientation = (0,0,0,1)
            ee_position[
                0] = ee_position_ref[0] + vrb.vive_controller_translation[0]
            ee_position[
                1] = ee_position_ref[1] + vrb.vive_controller_translation[1]
            # z axis limit
            z_control = ee_position_ref[2] + vrb.vive_controller_translation[2]
            #            print("ee position", vrb.vive_controller_translation[1],
            #                  - vrb.vive_controller_translation[0], vrb.vive_controller_translation[2])
            if z_control < -0.02:
                z_control = 0.02
            ee_position[2] = z_control

            ee_pose = (tuple(ee_position), ee_orientation)
            env.movep(ee_pose)
            marker_head_point = [ee_position[0], ee_position[1], 0.05]
            marker_tail_point = [ee_position[0], ee_position[1], 0.06]
            p.addUserDebugLine(marker_head_point,
                               marker_tail_point,
                               lineColorRGB=[1, 0, 0],
                               lifeTime=0.2,
                               lineWidth=3)

#        if vrb.grasp == 1:
#            env.ee.activate()
#        else:
#            env.ee.release()
#

        p.stepSimulation()
        rate.sleep()