Exemple #1
0
def test_mujoco_controllers_absolute_step():
    print("starting evaluation for {} episodes".format(args.num_eval_episodes))
    # generate random seed

    eval_base_path = os.path.join(
        results_dir, '_absstep%s_tc%s' %
        (args.relative_step_size, args.eval_filename_modifier))
    eval_step_file_path = eval_base_path + '.cpkl'
    eval_env = suite.load(domain_name=domain,
                          task_name=task,
                          task_kwargs=task_kwargs,
                          environment_kwargs=environment_kwargs)
    print("CREATING REPLAY eval", cam_dim)
    eval_replay_buffer = ReplayBuffer(kwargs['state_dim'],
                                      kwargs['action_dim'],
                                      max_size=int(args.eval_replay_size),
                                      cam_dim=cam_dim,
                                      seed=seed)

    error_dict = {}
    for jt in range(0, 13):
        error_dict[jt] = 0
        done = False
        num_steps = 0
        reward = 0
        state_names_dict = get_state_names_dict()
        state_type, reward, discount, state = eval_env.reset()
        frame_compressed = get_next_frame(eval_env)
        obs_angles = deepcopy(state['observations'][3:13 + 3])
        total_errors = 0
        direction = 1
        obs_angles = deepcopy(state['observations'][3:13 + 3])
        action = deepcopy(obs_angles)

        num_steps = 0
        done = False
        while not done:
            if obs_angles[jt] - (2 * args.relative_step_size) <= amins[jt]:
                direction = 1
                print("direction", direction, action[jt], amins[jt],
                      amaxes[jt])
            if obs_angles[jt] + (2 * args.relative_step_size) >= amaxes[jt]:
                direction = -1
                print("direction", direction, action[jt], amins[jt],
                      amaxes[jt])
            action[jt] = obs_angles[jt] + args.relative_step_size * direction
            print('JT{}N{}A'.format(jt, num_steps), action)
            reward = 0
            # Perform action
            step_type, _, discount, next_state = eval_env.step(action)
            last_obs_angles = deepcopy(state['observations'][3:13 + 3])
            obs_angles = deepcopy(next_state['observations'][3:13 + 3])
            print('JT{}N{}O'.format(jt, num_steps), obs_angles)

            error = action - obs_angles
            abs_error = np.abs(error)
            if np.max(abs_error) > .1:
                print("----ERROR", error)
                error_dict[jt] += 1
                reward = -1

            next_frame_compressed = get_next_frame(eval_env)
            done = step_type.last()
            # Store data in replay buffer

            eval_replay_buffer.add(state['observations'],
                                   action,
                                   reward,
                                   next_state['observations'],
                                   done,
                                   frame_compressed=frame_compressed,
                                   next_frame_compressed=next_frame_compressed)

            frame_compressed = next_frame_compressed
            state = next_state
            num_steps += 1

        print("JT TOTAL ERRORS", error_dict)
        last_steps = eval_replay_buffer.get_last_steps(num_steps)
        done = True

        emovie_path = eval_base_path + '_JT%02d_%s_%s.mp4' % (
            jt, args.eval_filename_modifier, args.camera_view)
        ebase = emovie_path.replace('.mp4', '')

        plotting.plot_states(last_steps, ebase, detail_dict=state_names_dict)
        plotting.plot_position_actions(last_steps,
                                       ebase,
                                       relative=args.relative_step)
        plotting.plot_frames(emovie_path,
                             eval_replay_buffer.get_last_steps(num_steps),
                             plot_action_frames=True,
                             min_action=-kwargs['max_action'],
                             max_action=kwargs['max_action'],
                             plot_frames=True)
        pickle.dump(eval_replay_buffer, open(ebase + '.epkl', 'wb'))
        plotting.plot_replay_reward(eval_replay_buffer,
                                    ebase,
                                    start_step=0,
                                    name_modifier='train')

    # write data files
    print("---------------------------------------")
    eval_replay_buffer.shrink_to_last_step()
    pickle.dump(eval_replay_buffer, open(eval_step_file_path, 'wb'))
    print("JT TOTAL ERRORS", error_dict)

    movie_path = eval_base_path + '_%s_%s.mp4' % (args.eval_filename_modifier,
                                                  args.camera_view)
    plotting.plot_frames(movie_path,
                         eval_replay_buffer.get_last_steps(
                             eval_replay_buffer.size),
                         plot_action_frames=True,
                         min_action=-kwargs['max_action'],
                         max_action=kwargs['max_action'],
                         plot_frames=False)
Exemple #2
0
def evaluate(load_model_filepath):
    print("starting evaluation for {} episodes".format(args.num_eval_episodes))
    policy, train_step, results_dir, loaded_modelpath = load_policy(load_model_filepath)
    eval_seed = args.seed+train_step
    task_kwargs['random'] = eval_seed
    load_model_base = loaded_modelpath.replace('.pt', '')
    plotting.plot_loss_dict(policy, load_model_base)
    state_names_dict = get_state_names_dict()
    train_replay_buffer = load_replay_buffer(load_model_base + '.pkl')

    eval_env = suite.load(domain_name=args.domain, task_name=args.task, task_kwargs=task_kwargs,  environment_kwargs=environment_kwargs)

    # generate random seed
    random_state = np.random.RandomState(eval_seed)
    train_dir = os.path.join(load_model_base + '_train%s'%args.eval_filename_modifier)
    if not os.path.exists(train_dir):
        os.makedirs(train_dir)
    train_base = os.path.join(train_dir, get_step_filename(train_step)+'_train')
    plotting.plot_replay_reward(train_replay_buffer, train_base, start_step=train_step, name_modifier='train')
    plotting.plot_states(train_replay_buffer.get_last_steps(train_replay_buffer.size), 
                train_base, detail_dict=state_names_dict)
 

    eval_dir = os.path.join(load_model_base + '_eval%s'%args.eval_filename_modifier)
    if not os.path.exists(eval_dir):
        os.makedirs(eval_dir)
    print('saving results to dir: {}'.format(eval_dir))
    eval_base = os.path.join(eval_dir, get_step_filename(train_step)+'_eval_S{:05d}'.format(eval_seed))

    eval_step_filepath = eval_base + '%s.epkl'%args.eval_filename_modifier
    if os.path.exists(eval_step_filepath) and not args.overwrite_replay:
        print('loading existing replay buffer:{}'.format(eval_step_filepath))
        eval_replay_buffer = load_replay_buffer(eval_step_filepath)
    else:

        eval_replay_buffer = ReplayBuffer(kwargs['state_dim'], kwargs['action_dim'], 
                                     max_size=int(args.eval_replay_size), 
                                     cam_dim=cam_dim, seed=eval_seed)
 

        for e in range(args.num_eval_episodes):
            done = False
            num_steps = 0
            state_type, reward, discount, state = eval_env.reset()
            frame_compressed = get_next_frame(eval_env)
            # TODO off by one error in step count!? of replay_buffer
            while done == False:
                action = (
                        policy.select_action(state['observations'])
                    ).clip(-kwargs['max_action'], kwargs['max_action'])
                # Perform action
                step_type, reward, discount, next_state = eval_env.step(action)
                next_frame_compressed = get_next_frame(eval_env)
                done = step_type.last()
                # Store data in replay buffer
                eval_replay_buffer.add(state['observations'], action, reward, 
                                  next_state['observations'], done, 
                                  frame_compressed=frame_compressed, 
                                  next_frame_compressed=next_frame_compressed)

                frame_compressed = next_frame_compressed
                state = next_state
                num_steps+=1
                time.sleep(.1)
 
            # plot episode
            er = np.int(eval_replay_buffer.episode_rewards[-1])
            epath = eval_base+ '_E{}_R{}'.format(e, er)
            exp = eval_replay_buffer.get_last_steps(num_steps)
            plotting.plot_states(exp, epath, detail_dict=state_names_dict)
            if args.domain == 'jaco':
                plotting.plot_position_actions(exp, epath, relative=True)
            if np.max([args.plot_movie, args.plot_action_movie, args.plot_frames]):
                emovie_path = epath+'CAM{}.mp4'.format(e, er, args.camera_view)
                print('plotting episode: {}'.format(emovie_path))
                plotting.plot_frames(emovie_path, 
                                     eval_replay_buffer.get_last_steps(num_steps),
                                      plot_action_frames=args.plot_action_movie,
                                       min_action=-kwargs['max_action'], max_action=kwargs['max_action'], 
                                     plot_frames=args.plot_frames)

    eval_replay_buffer.shrink_to_last_step()
    pickle.dump(eval_replay_buffer, open(eval_step_filepath, 'wb'))
    # plot evaluation
    plotting.plot_replay_reward(eval_replay_buffer, eval_base, start_step=train_step, name_modifier='eval')
    plotting.plot_states(eval_replay_buffer.get_last_steps(eval_replay_buffer.size), 
                eval_base, detail_dict=state_names_dict)

    if np.max([args.plot_movie, args.plot_action_movie, args.plot_frames]):
        movie_path = eval_base+'_CAM{}.mp4'.format(args.camera_view)
        plotting.plot_frames(movie_path, eval_replay_buffer.get_last_steps(eval_replay_buffer.size), plot_action_frames=args.plot_action_movie, min_action=-kwargs['max_action'], max_action=kwargs['max_action'], plot_frames=args.plot_frames)
    return eval_replay_buffer, eval_step_filepath
Exemple #3
0
def test_mujoco_controllers_relative_step():
    print("starting evaluation for {} episodes".format(args.num_eval_episodes))
    # generate random seed

    eval_base_path = os.path.join(
        results_dir,
        '_relstep%s_tc%s' % (args.relative_step, args.eval_filename_modifier))
    eval_step_file_path = eval_base_path + '.cpkl'
    eval_env = suite.load(domain_name=domain,
                          task_name=task,
                          task_kwargs=task_kwargs,
                          environment_kwargs=environment_kwargs)
    print("CREATING REPLAY eval", cam_dim)
    eval_replay_buffer = ReplayBuffer(kwargs['state_dim'],
                                      kwargs['action_dim'],
                                      max_size=int(args.eval_replay_size),
                                      cam_dim=cam_dim,
                                      seed=seed)
    print(amins)
    print(amaxes)
    error_dict = {}
    for jt in range(0, 13):
        error_dict[jt] = 0
        done = False
        num_steps = 0
        reward = 0
        state_names_dict = get_state_names_dict()
        state_type, reward, discount, state = eval_env.reset()
        frame_compressed = get_next_frame(eval_env)
        obs_angles = deepcopy(state['observations'][3:13 + 3])
        total_errors = 0
        # WHAT I KNOW
        # if I dont step, nothing changes
        # when I run named.data.qpos[home] - the robot looks like i want
        # however if from that home, I "step" to "home" (the same angles) - the result is 45 offset
        """

        dm_control's rl/control.py var CONTROL_TIMESTEP is not ideal for position controllers.
        We need a timestep of .001 (specified in the xml file) for the physics solver in order to get it to work.
        At each timestep, the robot has CONTROL_TIMESTEP seconds to reach a particular position.  
        We interface with the real robot by calling an action client which blocks until the position is reached. 
        I think the best way to make these to processes appear the same is to carefully choose CONTROL_TIMESTEP and only allow
        sequential actions to be relatively near to each other so as to ensure that they can be completed within 
        CONTROL_TIMESTEP seconds in mujoco.


        The two systems will have different types of errors if mujoco is misconfigured.  

        The pararms that effect performance for position controllers (as far as I can tell) are: 
        CONTROL_TIMESTEP sent to rl/control.py (time allowed to reach goal)
        kv gain in the xml file
        physics timestep in xml file
        and how far the desired action is from the current position


        Here are some performance stats with those variables tested: 
        +1 error count if an observed position angle > .1 of the commanded angle


        """
        direction = 1
        action = np.zeros(13)
        obs_angles = deepcopy(state['observations'][3:13 + 3])
        num_steps = 0
        done = False
        while not done:
            if obs_angles[jt] - (2 * args.relative_step_size) <= amins[jt]:
                direction = 1
                print("direction", direction, action[jt], amins[jt],
                      amaxes[jt])
            if obs_angles[jt] + (2 * args.relative_step_size) >= amaxes[jt]:
                direction = -1
                print("direction", direction, action[jt], amins[jt],
                      amaxes[jt])

            # turn hand so it is easier to see for finger moves
            #if num_steps < 10:
            #    action[1] = .1
            #    action[3] = .01

            #if jt == 4:
            #    if num_steps < 10:
            #        action[3] = .1
            #    else:
            #        action[3] = 0.0

            #if jt > 6:
            #    if num_steps < 10:
            #        action[5] = -.1
            #    else:
            #        action[5] = 0.0
            action[jt] = args.relative_step_size * direction
            #base_action[:7] = action
            print('JT{}N{}A'.format(jt, num_steps), action)
            reward = 0
            # Perform action
            step_type, _, discount, next_state = eval_env.step(action)
            last_obs_angles = deepcopy(state['observations'][3:13 + 3])
            obs_angles = deepcopy(next_state['observations'][3:13 + 3])
            print('JT{}N{}O'.format(jt, num_steps), obs_angles)

            error = (last_obs_angles + action) - obs_angles
            abs_error = np.abs(error)
            if np.max(abs_error) > .1:
                print("----ERROR", error)
                error_dict[jt] += 1
                reward = -1

            next_frame_compressed = get_next_frame(eval_env)
            done = step_type.last()
            # Store data in replay buffer

            eval_replay_buffer.add(state['observations'],
                                   action,
                                   reward,
                                   next_state['observations'],
                                   done,
                                   frame_compressed=frame_compressed,
                                   next_frame_compressed=next_frame_compressed)

            frame_compressed = next_frame_compressed
            state = next_state
            num_steps += 1

        print("JT TOTAL ERRORS", error_dict)
        last_steps = eval_replay_buffer.get_last_steps(num_steps)
        done = True

        emovie_path = eval_base_path + '_JT%02d_%s_%s.mp4' % (
            jt, args.eval_filename_modifier, args.camera_view)
        ebase = emovie_path.replace('.mp4', '')

        plotting.plot_states(last_steps, ebase, detail_dict=state_names_dict)
        plotting.plot_position_actions(last_steps,
                                       ebase,
                                       relative=args.relative_step)
        plotting.plot_frames(emovie_path,
                             eval_replay_buffer.get_last_steps(num_steps),
                             plot_action_frames=True,
                             min_action=-kwargs['max_action'],
                             max_action=kwargs['max_action'],
                             plot_frames=True)
        pickle.dump(eval_replay_buffer, open(ebase + '.epkl', 'wb'))
        plotting.plot_replay_reward(eval_replay_buffer,
                                    ebase,
                                    start_step=0,
                                    name_modifier='train')

    # write data files
    print("---------------------------------------")
    eval_replay_buffer.shrink_to_last_step()
    pickle.dump(eval_replay_buffer, open(eval_step_file_path, 'wb'))
    print("JT TOTAL ERRORS", error_dict)

    movie_path = eval_base_path + '_%s_%s.mp4' % (args.eval_filename_modifier,
                                                  args.camera_view)
    plotting.plot_frames(movie_path,
                         eval_replay_buffer.get_last_steps(
                             eval_replay_buffer.size),
                         plot_action_frames=True,
                         min_action=-kwargs['max_action'],
                         max_action=kwargs['max_action'],
                         plot_frames=False)