def main():
    rospy.init_node('raven_vive_teleop')
    rate = rospy.Rate(60.0)
    train_dataset = Dataset(os.path.join(dataset_root,
                                         'ravens_demo-1623923819265029807'),
                            use_aux=True)
    max_demos = train_dataset.n_episodes

    episodes = np.random.choice(range(max_demos), max_demos, False)
    print(episodes)
    train_dataset.set(episodes)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None

    seed = 0

    agent = bc_softmax_aux.BC_Agent()
    #agent = bc_softmax2d_aux.BC_Agent()
    agent.load_pretrained_model(model_save_rootdir +
                                "pretrain_model_4400_epoch")
    agent.act(obs)

    episode_steps = 0
    n_episode = 1
    aux = {
        'ee_pose': ((0, 0, 0), (0, 0, 0, 1)),
        'obj_pose': ((0, 0, 0), (0, 0, 0, 1))
    }

    plt.ion()
    im_color = obs['color'][0]
    img_draw = plt.imshow(im_color, interpolation='nearest', cmap='gray')

    while not rospy.is_shutdown():
        (obs, act, _, _, aux_dataset), _ = train_dataset.sample()

        im_color = obs['color'][0]
        img_draw.set_data(im_color)
        plt.pause(0.00000001)

        action, predicted_aux = agent.act(obs, ret_aux=True)
        #if action != None:
        #    print(action)
        #obj_pose = predicted_aux['predicted_obj_pose']
        obj_pose = predicted_aux['predicted_ee_position']
        #obj_pose = aux['ee_pose']
        marker_head_point = [obj_pose[0], obj_pose[1], obj_pose[2]]
        marker_tail_point = [obj_pose[0], obj_pose[1], obj_pose[2] + 0.01]
        p.addUserDebugLine(marker_head_point,
                           marker_tail_point,
                           lineColorRGB=[0, 1, 0],
                           lifeTime=3,
                           lineWidth=5)

        obs, reward, done, info, aux = env.step_simple(action,
                                                       use_aux=True,
                                                       stand_still=True)
        #obs, reward, done, info, aux = env.step_simple(action,use_aux = True)
        plt.pause(3)
        print(aux_dataset)
        print(predicted_aux)
        auxg = np.array(aux_dataset['ee_pose'][0], dtype=float)
        auxp = np.array(predicted_aux['predicted_ee_position'], dtype=float)
        d = np.sum(np.square(auxg - auxp)) / 3
        print('mse:%f\n' % d)

        # im_depth = obs['depth'][0]
        # img_draw.set_data(im_depth)
        # plt.pause(0.00000001)
        # plt.show()
        # print( im_color.shape)

        # print(obs['color'])
        s = "Episode:%d, steps:%d" % (n_episode, episode_steps)
        print(s)
        if (done):
            n_episode += 1
            episode_steps = 0

            seed += 1

            episode = []

        else:
            episode_steps += 1

        if episode_steps > 100:
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()

        rate.sleep()
def main(unused_argv):

    rospy.init_node('raven_vive_teleop')

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(
        os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = bc_softmax_aux.BC_Agent()
    #agent = bc_softmax2d_aux.BC_Agent()
    agent.load_pretrained_model(model_save_rootdir + "model_8200_epoch")
    agent.act(obs)
    f = open(model_save_rootdir + "rollout_log.txt", "w+")
    episode_steps = 0
    n_episode = 1
    aux = {
        'ee_pose': ((0, 0, 0), (0, 0, 0, 1)),
        'obj_pose': ((0, 0, 0), (0, 0, 0, 1))
    }
    while not rospy.is_shutdown():

        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()

        action, predicted_aux = agent.act(obs, ret_aux=True)
        #if action != None:
        #    print(action)
        obj_pose = predicted_aux['predicted_obj_pose']
        #obj_pose = predicted_aux['predicted_ee_position']
        #obj_pose = aux['ee_pose']
        marker_head_point = [obj_pose[0], obj_pose[1], obj_pose[2]]
        marker_tail_point = [obj_pose[0], obj_pose[1], obj_pose[2] + 0.01]
        p.addUserDebugLine(marker_head_point,
                           marker_tail_point,
                           lineColorRGB=[0, 1, 0],
                           lifeTime=0.2,
                           lineWidth=5)

        #obs, reward, done, info, aux = env.step_simple(action,use_aux = True, stand_still = True)
        obs, reward, done, info, aux = env.step_simple(action, use_aux=True)
        print(aux)
        print(predicted_aux)
        # im_depth = obs['depth'][0]
        # img_draw.set_data(im_depth)
        # plt.pause(0.00000001)
        # plt.show()
        # print( im_color.shape)

        # print(obs['color'])
        s = "Episode:%d, steps:%d" % (n_episode, episode_steps)
        print(s)
        if (done):
            n_episode += 1
            episode_steps = 0
            s += ", succeed!\n"
            f.write(s)
            f.flush()
            seed += 1
            dataset.add(seed, episode)
            episode = []

        else:
            episode_steps += 1

        if episode_steps > 100:
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()
            s += ", failed!\n"
            f.write(s)
            f.flush()

        rate.sleep()
Пример #3
0
def main(unused_argv):
    
    
    
    
    rospy.init_node('raven_vive_teleop')
     

    temp_flag = 0
    
    pre_position = [0,0,0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)
    
    
    env = Environment(
      assets_root,
      disp=True,
      hz=60)
    
    task = tasks.names[task_name]()
    task.mode = mode

    
    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = train_bc.BC_Agent()
    agent.load_pretrained_model(model_save_rootdir + "model_500_epoch")
    agent.act(obs)
    f = open(model_save_rootdir+ "rollout_log.txt","w+")
    episode_steps = 0
    n_episode = 1
    while not rospy.is_shutdown():
  
        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []
            episode_steps = 0
            obs = env.reset()
        
        action = agent.act(obs)
        #if action != None:
        #    print(action)
    
        
        obs, reward, done, info = env.step_simple(action)
        im_depth = obs['depth'][0]
        img_draw.set_data(im_depth)
        plt.pause(0.00000001)
        plt.show()
        # print( im_color.shape)

        # print(obs['color'])
        s = "Episode:%d, steps:%d"%(n_episode, episode_steps)
        print(s)
        if(done):
            n_episode += 1
            episode_steps = 0
            s += ", succeed!\n" 
            f.write(s)
            f.flush()
            seed += 1
            dataset.add(seed, episode)
            episode = []

        else:
            episode_steps += 1

        if episode_steps > 100:
            episode = []
            episode_steps = 0
            n_episode += 1

            obs = env.reset()
            s += ", failed!\n" 
            f.write(s)
            f.flush()
        


        

        
        rate.sleep()
Пример #4
0
def main(unused_argv):

    rospy.init_node('raven_vive_teleop')

    temp_flag = 0

    pre_position = [0, 0, 0]
    pre_pose = [1, 0, 0, 0]
    temp = [1, 0, 0, 0]
    # same loop rate as gym environment
    rate = rospy.Rate(60.0)

    env = Environment(assets_root, disp=True, hz=60)

    task = tasks.names[task_name]()
    task.mode = mode

    env.set_task(task)
    obs = env.reset()
    info = None
    #agent = teleop_agent(env)
    dataset = Dataset(
        os.path.join(dataset_root, f'ravens-bc-rollout-{time.time_ns()}'))
    seed = 0

    br = tf.TransformBroadcaster()
    rci = RobotControlInterface(env)

    episode = []
    reward = 0
    done = False

    plt.ion()
    im_color = obs['color'][0]
    im_depth = obs['depth'][0]
    img_draw = plt.imshow(im_depth, interpolation='nearest')

    agent = random_action_agent(env)

    f = open(model_save_rootdir + "rollout_log.txt", "w+")
    episode_steps = 0
    n_episode = 1
    while not rospy.is_shutdown():

        #p.stepSimulation()
        #p.getCameraImage(480, 320)
        keys = p.getKeyboardEvents()
        if ord("r") in keys and keys[ord("r")] & p.KEY_WAS_RELEASED:
            print("reset env")
            episode = []

            obs = env.reset()

        action = agent.act()
        #if action != None:
        #    print(action)
        i = 30
        while (i > 0):
            obs, reward, done, info, aux = env.step_simple(action,
                                                           use_aux=True)
            i -= 1
        state = get_state_ground_truth(env)
        print(state)
        print(aux)
        env.reset()

        rate.sleep()