Ejemplo n.º 1
0
    def save_act(self, path=None):
        """Save model to a pickle located at `path`"""
        if path is None:
            path = os.path.join(logger.get_dir(), "model.pkl")

        with tempfile.TemporaryDirectory() as td:
            save_variables(os.path.join(td, "model"))
            arc_name = os.path.join(td, "packed.zip")
            with zipfile.ZipFile(arc_name, 'w') as zipf:
                for root, dirs, files in os.walk(td):
                    for fname in files:
                        file_path = os.path.join(root, fname)
                        if file_path != arc_name:
                            zipf.write(file_path, os.path.relpath(file_path, td))
            with open(arc_name, "rb") as f:
                model_data = f.read()
        with open(path, "wb") as f:
            cloudpickle.dump((model_data, self._act_params), f)
Ejemplo n.º 2
0
def train_policy(arglist):
    with U.single_threaded_session():
        # Create the environment
        if arglist.use_dense_rewards:
            print("Will use env MineRLNavigateDense-v0")
            env = gym.make("MineRLNavigateDense-v0")
            env_name = "MineRLNavigateDense-v0"
        else:
            print("Will use env MineRLNavigate-v0")
            env = gym.make('MineRLNavigate-v0')
            env_name = "MineRLNavigate-v0"

        if arglist.force_forward:
            env = MineCraftWrapperSimplified(env)
        else:
            env = MineCraftWrapper(env)

        if not arglist.use_demonstrations:
            # Use stack of last 4 frames as obs
            env = FrameStack(env, 4)

        # Create all the functions necessary to train the model
        act, train, update_target, debug = deepq.build_train(
            make_obs_ph=lambda name: ObservationInput(env.observation_space,
                                                      name=name),
            q_func=build_q_func('conv_only', dueling=True),
            num_actions=env.action_space.n,
            gamma=0.9,
            optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
        )

        # Create the replay buffer(s) (TODO: Use prioritized replay buffer)
        if arglist.use_demonstrations:
            replay_buffer = ReplayBuffer(int(arglist.replay_buffer_len / 2))
            demo_buffer = load_demo_buffer(env_name,
                                           int(arglist.replay_buffer_len / 2))
        else:
            replay_buffer = ReplayBuffer(arglist.replay_buffer_len)

        # Create the schedule for exploration starting from 1 (every action is random) down to
        # 0.02 (98% of actions are selected according to values predicted by the model).
        exploration = LinearSchedule(
            schedule_timesteps=arglist.num_exploration_steps *
            arglist.num_episodes * arglist.max_episode_steps,
            initial_p=1.0,
            final_p=arglist.final_epsilon)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()

        episode_rewards = [0.0]
        n_episodes = 0
        n_steps = 0
        obs = env.reset()
        log_path = "./learning_curves/minerl_" + str(date.today()) + "_" + str(
            time.time()) + ".dat"
        log_file = open(log_path, "a")
        for episode in range(arglist.num_episodes):
            print("Episode: ", str(episode))
            done = False
            episode_steps = 0
            while not done:

                # Take action and update exploration to the newest value
                action = act(obs[None],
                             update_eps=exploration.value(n_steps))[0]
                new_obs, rew, done, _ = env.step(action)
                n_steps += 1
                episode_steps += 1

                # Break episode
                if episode_steps > arglist.max_episode_steps:
                    done = True

                # Store transition in the replay buffer.
                replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                # Store rewards
                episode_rewards[-1] += rew
                if done:
                    obs = env.reset()
                    episode_rewards.append(0)
                    n_episodes += 1

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if (n_steps > arglist.learning_starts_at_steps) and (n_steps %
                                                                     4 == 0):
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        32)
                    train(obses_t, actions, rewards, obses_tp1, dones,
                          np.ones_like(rewards))

                if arglist.use_demonstrations:
                    if (n_steps < arglist.learning_starts_at_steps) and (
                            n_steps % 4 == 0):
                        obses_t, actions, rewards, obses_tp1, dones = demo_buffer.sample(
                            32)
                        train(obses_t, actions, rewards, obses_tp1, dones,
                              np.ones_like(rewards))
                    if (n_steps > arglist.learning_starts_at_steps) and (
                            n_steps % 4 == 0):
                        obses_t, actions, rewards, obses_tp1, dones = demo_buffer.sample(
                            32)
                        train(obses_t, actions, rewards, obses_tp1, dones,
                              np.ones_like(rewards))

                # Update target network periodically.
                if n_steps % arglist.target_net_update_freq == 0:
                    update_target()

                # Log data for analysis
                if done and len(episode_rewards) % 10 == 0:
                    logger.record_tabular("steps", n_steps)
                    logger.record_tabular("episodes", len(episode_rewards))
                    logger.record_tabular(
                        "mean episode reward",
                        round(np.mean(episode_rewards[-101:-1]), 1))
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * exploration.value(n_steps)))
                    logger.dump_tabular()

                #TODO: Save checkpoints
                if n_steps % arglist.checkpoint_rate == 0:
                    checkpoint_path = "./checkpoints/minerl_" + str(
                        episode) + "_" + str(date.today()) + "_" + str(
                            time.time()) + ".pkl"
                    save_variables(checkpoint_path)
                    print("%s,%s,%s,%s" %
                          (n_steps, episode,
                           round(np.mean(episode_rewards[-101:-1]),
                                 1), int(100 * exploration.value(n_steps))),
                          file=log_file)
        log_file.close()
Ejemplo n.º 3
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=100000,
          exploration_fraction=0.1,
          exploration_final_eps=0.1,
          train_freq=1,
          batch_size=64,
          print_freq=1,
          eval_freq=2500,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          csv_path="results.csv",
          method_type="baseline",
          **network_kwargs):
    """Train a deepr model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepr.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepr/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    #q_func = build_q_func(network, **network_kwargs)
    q_func = build_q_func(mlp(num_layers=4, num_hidden=64), **network_kwargs)
    #q_func = build_q_func(mlp(num_layers=2, num_hidden=64, activation=tf.nn.relu), **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(
        schedule_timesteps=int(exploration_fraction * total_timesteps),
        #initial_p=1.0,
        initial_p=exploration_final_eps,
        final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    eval_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        csvfile = open(csv_path, 'w', newline='')
        fieldnames = ['STEPS', 'REWARD']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()

        for t in range(total_timesteps + 1):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                #update_eps = exploration.value(t)
                update_eps = exploration_final_eps
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True

            action_mask = get_mask(env, method_type)
            a = act(np.array(obs)[None],
                    unused_actions_neginf_mask=action_mask,
                    update_eps=update_eps,
                    **kwargs)[0]

            env_action = a
            reset = False
            new_obs, rew, done, _ = env.step(env_action)

            eval_rewards[-1] += rew

            action_mask_p = get_mask(env, method_type)
            # Shaping
            if method_type == 'shaping':

                ## look-ahead shaping
                ap = act(np.array(new_obs)[None],
                         unused_actions_neginf_mask=action_mask_p,
                         stochastic=False)[0]
                f = action_mask_p[ap] - action_mask[a]
                rew = rew + f

            # Store transition in the replay buffer.
            #replay_buffer.add(obs, a, rew, new_obs, float(done), action_mask_p)
            if method_type != 'shaping':
                replay_buffer.add(obs, a, rew, new_obs, float(done),
                                  np.zeros(env.action_space.n))
            else:
                replay_buffer.add(obs, a, rew, new_obs, float(done),
                                  action_mask_p)
            obs = new_obs

            if t % eval_freq == 0:
                eval_rewards.append(0.0)

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones, masks_tp1 = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights, masks_tp1)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_eval_reward = round(np.mean(eval_rewards[-1 - print_freq:-1]),
                                     1)
            num_evals = len(eval_rewards)
            if t > 0 and t % eval_freq == 0 and print_freq is not None and t % (
                    print_freq * eval_freq) == 0:
                #if done and print_freq is not None and len(eval_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("evals", num_evals)
                logger.record_tabular("average reward in this eval",
                                      mean_eval_reward / (eval_freq))
                logger.record_tabular("total reward in this eval",
                                      mean_eval_reward)
                logger.dump_tabular()

                writer.writerow({
                    "STEPS": t,
                    "REWARD": mean_eval_reward / (eval_freq)
                })
                csvfile.flush()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_evals > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_eval_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_eval_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_eval_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 4
0
 def save(self, save_path):
     tf_util.save_variables(save_path)
Ejemplo n.º 5
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=5,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the trained model from. (default: None)(used in test stage)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.

    """

    # Create all the functions necessary to train the model
    sess = get_session()
    set_global_seeds(seed)
    med_libs = MedLibs()
    '''Define Q network 
    inputs: observation place holder(make_obs_ph), num_actions, scope, reuse
    outputs(tensor of shape batch_size*num_actions): values of each action, Q(s,a_{i})
    '''
    q_func = build_q_func(network, **network_kwargs)
    '''  To put observations into a placeholder  '''
    # TODO: Can only deal with Discrete and Box observation spaces for now
    # observation_space = env.observation_space (default)
    # Use sub_obs_space instead

    observation_space = med_libs.subobs_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    '''  Customize action  '''
    # TODO: subset of action space.
    action_dim = med_libs.sub_act_dim
    ''' 
    Returns: deepq.build_train()
        act: (tf.Variable, bool, float) -> tf.Variable
            function to select and action given observation.
            act is computed by [build_act] or [build_act_with_param_noise]
        train: (object, np.array, np.array, object, np.array, np.array) -> np.array
            optimize the error in Bellman's equation.
        update_target: () -> ()
            copy the parameters from optimized Q function to the target Q function. 
        debug: {str: function}
            a bunch of functions to print debug data like q_values.
    '''

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=action_dim,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        double_q=True,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': action_dim,
    }
    '''Contruct an act object using ActWrapper'''
    act = ActWrapper(act, act_params)
    ''' Create the replay buffer'''
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    '''Create the schedule for exploration starting from 1.'''
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)
    '''
    Initialize all the uninitialized variables in the global scope and copy them to the target network.
    '''
    U.initialize()
    update_target()
    episode_rewards = [0.0]
    saved_mean_reward = None

    obs = env.reset()
    sub_obs = med_libs.custom_obs(obs)  # TODO: customize observations
    pre_obs = obs
    reset = True
    mydict = med_libs.action_dict
    already_starts = False

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td
        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True

        elif load_path is not None:
            # load_path: a trained model/policy
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))
        ''' Training loop starts'''
        t = 0
        while t < total_timesteps:
            if callback is not None:
                if callback(locals(), globals()):
                    break
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            ''' Choose action: take action and update exploration to the newest value
            '''
            # TODO: Mixed action strategy
            # Normal status, action is easily determined by rules, use [obs]
            action = med_libs.simple_case_action(obs)
            # Distraction status, action is determined by Q, with [sub_obs]
            if action == -10:
                action = act(np.array(sub_obs)[None],
                             update_eps=update_eps,
                             **kwargs)[0]
                action = med_libs.action_Q_env(
                    action
                )  # TODO:action_Q_env, from Q_action(0~2) to env_action(2~4)

            reset = False
            ''' Step action '''
            new_obs, rew, done, d_info = env.step(action)
            d_att_last = int(pre_obs[0][0])
            d_att_now = int(obs[0][0])
            d_att_next = int(new_obs[0][0])
            ''' Store transition in the replay buffer.'''
            pre_obs = obs
            obs = new_obs
            sub_new_obs = med_libs.custom_obs(new_obs)

            if (d_att_last == 0 and d_att_now == 1) and not already_starts:
                already_starts = True

            if already_starts and d_att_now == 1:
                replay_buffer.add(sub_obs, action, rew, sub_new_obs,
                                  float(done))
                episode_rewards[-1] += rew  # Sum of rewards
                t = t + 1
                print(
                    '>> Iteration:{}, State[d_att,cd_activate,L4_available,ssl4_activate,f_dc]:{}'
                    .format(t, sub_obs))
                print(
                    'Dis_Last:{}, Dis_Now:{}, Dis_Next:{},Reward+Cost:{}, Action:{}'
                    .format(
                        d_att_last, d_att_now, d_att_next, rew,
                        list(mydict.keys())[list(
                            mydict.values()).index(action)]))

            # update sub_obs
            sub_obs = sub_new_obs

            # Done and Reset
            if done:
                print('Done infos: ', d_info)
                print('======= end =======')
                obs = env.reset()
                sub_obs = med_libs.custom_obs(obs)  # TODO: custom obs
                pre_obs = obs  # TODO: save obs at t-1
                already_starts = False
                episode_rewards.append(0.0)
                reset = True

            # Update the Q network parameters
            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None

                # Calculate td-errors
                actions = med_libs.action_env_Q(
                    actions
                )  # TODO:action_env_Q, from env_action(2~4) to Q_action(0~2)
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)

                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically, copy weights of Q to target Q
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 6
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          lambda_=0.1,
          margin=0.1,
          i_before=1,
          gamma=1.0,
          target_network_update_freq=500,
          double_q=False,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
          ):
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = modified_deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        lambda_=lambda_,
        margin=margin,
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        double_q=double_q
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = ModifiedPrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ModifiedReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    episode_scores = deque(maxlen=100)
    saved_mean_reward = None
    obs = env.reset()
    reset = True
    trained = False
    num_steps_per_episode = 0
    obses_before = deque(maxlen=i_before)

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(
                    1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n)
                )
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, info = env.step(env_action)

            # Store transition in the replay buffer.
            if len(obses_before) < obses_before.maxlen:
                obses_before.append(obs)
                obs_tmi = np.zeros_like(obs)
                replay_buffer.new_add(obs_tmi, obs, action, rew, new_obs, float(done), float(False))
            else:
                obs_tmi = obses_before.popleft()
                obses_before.append(obs)
                replay_buffer.new_add(obs_tmi, obs, action, rew, new_obs, float(done), float(True))
            obs = new_obs

            episode_rewards[-1] += rew
            num_steps_per_episode += 1
            if done:
                if 'episode' in info:
                    episode_scores.append(info['episode']['r'])
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True
                obses_before.clear()

            if t > learning_starts and t % train_freq == 0:
                trained = True

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    obses_tmi, obses_t, actions, rewards, obses_tp1, dones, has_obs_tmis, weights, batch_idxes = experience
                else:
                    obses_tmi, obses_t, actions, rewards, obses_tp1, dones, has_obs_tmis = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors, min_delta_d, max_delta_d, delta_d, representation_loss, weighted_error = train(
                    obses_tmi, obses_t, actions, rewards, obses_tp1, dones, has_obs_tmis, weights
                )
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            if len(episode_scores) != 0:
                mean_100ep_scores = sum(episode_scores) / len(episode_scores)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("mean 100 episode scores", mean_100ep_scores)
                logger.record_tabular("number of steps per episode", num_steps_per_episode)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))

                if t > learning_starts and trained:
                    # Log extra loss information
                    logger.record_tabular("delta d", delta_d)
                    logger.record_tabular("min delta d", min_delta_d)
                    logger.record_tabular("max delta d", max_delta_d)
                    logger.record_tabular("representation loss", representation_loss)
                    logger.record_tabular("weighted error", weighted_error)

                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                            saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
            if done:
                num_steps_per_episode = 0
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 7
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 8
0
def learn_neural_linear(
        env,
        network,
        seed=None,
        lr=5e-4,
        total_timesteps=100000,
        buffer_size=50000,
        exploration_fraction=0.1,
        exploration_final_eps=0.02,
        train_freq=1,
        batch_size=32,
        print_freq=10,  #100
        checkpoint_freq=10000,
        checkpoint_path=None,
        learning_starts=999,
        gamma=1.0,
        target_network_update_freq=500,
        prioritized_replay=False,
        prioritized_replay_alpha=0.6,
        prioritized_replay_beta0=0.4,
        prioritized_replay_beta_iters=None,
        prioritized_replay_eps=1e-6,
        param_noise=False,
        callback=None,
        load_path=None,
        ddqn=False,
        prior="no prior",
        actor="dqn",
        **network_kwargs):
    #Train a deepq model.

    # Create all the functions necessary to train the model
    checkpoint_path = logger.get_dir()
    sess = get_session()
    set_global_seeds(seed)

    blr_params = BLRParams()
    q_func = deepq.models.cnn_to_mlp(
        convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)],
        hiddens=[blr_params.feat_dim],
        dueling=bool(0),
    )
    # q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, feat_dim, feat, feat_target, target, last_layer_weights, blr_ops, blr_helpers = deepq.build_train_neural_linear(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        double_q=ddqn,
        actor=actor)
    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        # BLR
        # preliminearies
        num_actions = env.action_space.n
        w_mu = np.zeros((num_actions, feat_dim))
        w_sample = np.random.normal(loc=0,
                                    scale=0.1,
                                    size=(num_actions, feat_dim))
        w_target = np.random.normal(loc=0,
                                    scale=0.1,
                                    size=(num_actions, feat_dim))
        w_cov = np.zeros((num_actions, feat_dim, feat_dim))
        for a in range(num_actions):
            w_cov[a] = np.eye(feat_dim)

        phiphiT = np.zeros((num_actions, feat_dim, feat_dim))
        phiY = np.zeros((num_actions, feat_dim))

        a0 = 6
        b0 = 6
        a_sig = [a0 for _ in range(num_actions)]
        b_sig = [b0 for _ in range(num_actions)]

        yy = [0 for _ in range(num_actions)]

        blr_update = 0

        for t in tqdm(range(total_timesteps)):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # if t % 1000 == 0:
            #     print("{}/{}".format(t,total_timesteps))
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True

            action = act(np.array(obs)[None], w_sample[None])
            env_action = action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)

            # clipping like in BDQN
            rew = np.sign(rew)

            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            # sample new w from posterior
            if t > 0 and t % blr_params.sample_w == 0:
                for i in range(num_actions):
                    if blr_params.no_prior:
                        w_sample[i] = np.random.multivariate_normal(
                            w_mu[i], w_cov[i])
                    else:
                        sigma2_s = b_sig[i] * invgamma.rvs(a_sig[i])
                        w_sample[i] = np.random.multivariate_normal(
                            w_mu[i], sigma2_s * w_cov[i])

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                # when target network updates we update our posterior belifes
                # and transfering information from the old target
                # to our new target
                blr_update += 1
                if blr_update == 10:  #10
                    print("updating posterior parameters")
                    if blr_params.no_prior:
                        phiphiT, phiY, w_mu, w_cov, a_sig, b_sig = BayesRegNoPrior(
                            phiphiT, phiY, w_target, replay_buffer, feat,
                            feat_target, target, num_actions,
                            blr_params, w_mu, w_cov,
                            sess.run(last_layer_weights), prior, blr_ops,
                            blr_helpers)
                    else:
                        phiphiT, phiY, w_mu, w_cov, a_sig, b_sig = BayesRegWithPrior(
                            phiphiT, phiY, w_target, replay_buffer, feat,
                            feat_target, target, num_actions, blr_params, w_mu,
                            w_cov, sess.run(last_layer_weights))
                    blr_update = 0

                print("updateing target, steps {}".format(t))
                update_target()
                w_target = w_mu

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            mean_10ep_reward = round(np.mean(episode_rewards[-11:-1]), 1)
            num_episodes = len(episode_rewards)
            # if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
            if t % 10000 == 0:  #1000
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("mean 10 episode reward",
                                      mean_10ep_reward)
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 9
0
 def save(self, save_path):
     tf_util.save_variables(save_path)
Ejemplo n.º 10
0
def learn(env,
          network,
          seed=None,
          lr=1e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          multiplayer=False,
          callback=None,
          load_path=None,
          load_path_1=None,
          load_path_2=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """

    # This was all handled in not the most elegant way
    # Variables have a _1 or _2 appended to them to separate them
    # and a bunch of if statementss to have the _2 variables not do anything in single-player

    # when in multiplayer Space Invaders, need to not reward players for other player dying
    isSpaceInvaders = False
    if "SpaceInvaders" in str(env):
        isSpaceInvaders = True

    # put a limit on the amount of memory used, otherwise TensorFlow will consume nearly everything
    # this leaves 1 GB free on my computer, others may need to change it

    # Create all the functions necessary to train the model
    # Create two separate TensorFlow sessions
    graph_1 = tf.Graph()
    sess_1 = tf.Session(graph=graph_1)
    if multiplayer:
        graph_2 = tf.Graph()
        sess_2 = tf.Session(graph=graph_2)
    else:
        # set session 2 to None if it's not being used
        sess_2 = None
    set_global_seeds(seed)
    # specify the q functions as separate objects
    q_func_1 = build_q_func(network, **network_kwargs)
    if multiplayer:
        q_func_2 = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    # build everything for the first model
    # pass in the session and the "_1" suffix
    act_1, train_1, update_target_1, debug_1 = deepq.build_train(
        sess=sess_1,
        make_obs_ph=make_obs_ph,
        q_func=q_func_1,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        scope="deepq")
    # a lot of if multiplayer statements duplicating these actions for a second network
    # pass in session 2 and "_2" instead
    if multiplayer:
        act_2, train_2, update_target_2, debug_2 = deepq.build_train(
            sess=sess_2,
            make_obs_ph=make_obs_ph,
            q_func=q_func_2,
            num_actions=env.action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=lr),
            gamma=gamma,
            grad_norm_clipping=10,
            param_noise=param_noise,
            scope="deepq")

    # separate act_params for each wrapper
    act_params_1 = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func_1,
        'num_actions': env.action_space.n,
    }
    if multiplayer:
        act_params_2 = {
            'make_obs_ph': make_obs_ph,
            'q_func': q_func_2,
            'num_actions': env.action_space.n,
        }
    # make the act wrappers
    act_1 = ActWrapper(act_1, act_params_1)
    if multiplayer:
        act_2 = ActWrapper(act_2, act_params_2)
    # I need to return something if it's single-player
    else:
        act_2 = None

    # Create the replay buffer
    # separate replay buffers are required for each network
    # this is required for competitive because the replay buffers hold rewards
    # and player 2 has different rewards than player 1
    if prioritized_replay:
        replay_buffer_1 = PrioritizedReplayBuffer(
            buffer_size, alpha=prioritized_replay_alpha)
        if multiplayer:
            replay_buffer_2 = PrioritizedReplayBuffer(
                buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer_1 = ReplayBuffer(buffer_size)
        if multiplayer:
            replay_buffer_2 = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    # initialize both sessions
    U.initialize(sess_1)
    if multiplayer:
        U.initialize(sess_2)
    # the session was passed into these functions when they were created
    # the separate update functions work within the different sessions
    update_target_1()
    if multiplayer:
        update_target_2()

    # keep track of rewards for both models separately
    episode_rewards_1 = [0.0]
    saved_mean_reward_1 = None
    if multiplayer:
        episode_rewards_2 = [0.0]
        saved_mean_reward_2 = None
    obs = env.reset()
    reset = True

    # storing stuff in a temporary directory while it's working
    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td
        model_file_1 = os.path.join(td, "model_1")
        temp_file_1 = os.path.join(td, "temp_1")
        model_saved_1 = False
        if multiplayer:
            model_file_2 = os.path.join(td, "model_2")
            temp_file_2 = os.path.join(td, "temp_2")
            model_saved_2 = False

        if tf.train.latest_checkpoint(td) is not None:
            if multiplayer:
                # load both models if multiplayer is on
                load_variables(model_file_1, sess_1)
                logger.log('Loaded model 1 from {}'.format(model_file_1))
                model_saved_1 = True
                load_variables(model_file_2, sess_2)
                logger.log('Loaded model 2 from {}'.format(model_file_2))
                model_saved_2 = True
            # otherwise just load the first one
            else:
                load_variables(model_file_1, sess_1)
                logger.log('Loaded model from {}'.format(model_file_1))
                model_saved_1 = True
        # I have separate load variables for single-player and multiplayer
        # this should be None if multiplayer is on
        elif load_path is not None:
            load_variables(load_path, sess_1)
            logger.log('Loaded model from {}'.format(load_path))
        # load the separate models in for multiplayer
        # should load the variables into the appropriate sessions

        # my format may restrict things to working properly only when a Player 1 model is loaded into session 1, and same for Player 2
        # however, in practice, the models won't work properly otherwise
        elif multiplayer:
            if load_path_1 is not None:
                load_variables(load_path_1, sess_1)
                logger.log('Loaded model 1 from {}'.format(load_path_1))
            if load_path_2 is not None:
                load_variables(load_path_2, sess_2)
                logger.log('Loaded model 2 from {}'.format(load_path_2))

        # actual training starts here
        for t in range(total_timesteps):
            # use this for updating purposes
            actual_t = t + 1
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            # receive model 1's action based on the model and observation
            action_1 = act_1(np.array(obs)[None],
                             update_eps=update_eps,
                             **kwargs)[0]
            env_action_1 = action_1
            # do the same for model 2 if in multiplayer
            if multiplayer:
                action_2 = act_2(np.array(obs)[None],
                                 update_eps=update_eps,
                                 **kwargs)[0]
                env_action_2 = action_2
            reset = False
            # apply actions to the environment
            if multiplayer:
                new_obs, rew_1, rew_2, done, _ = env.step(
                    env_action_1, env_action_2)
            # apply single action if there isn't a second model
            else:
                new_obs, rew_1, rew_2, done, _ = env.step(env_action_1)

            # manual clipping for Space Invaders multiplayer
            if isSpaceInvaders and multiplayer:
                # don't reward a player when the other player dies
                # change the reward to 0
                # the only time either player will get rewarded 200 is when the other player dies
                if rew_1 >= 200:
                    rew_1 = rew_1 - 200.0
                if rew_2 >= 200:
                    rew_2 = rew_2 - 200.0
                # manually clip the rewards using the sign function
                rew_1 = np.sign(rew_1)
                rew_2 = np.sign(rew_2)
                combo_factor = 0.25
                rew_1_combo = rew_1 + combo_factor * rew_2
                rew_2_combo = rew_2 + combo_factor * rew_1
                rew_1 = rew_1_combo
                rew_2 = rew_2_combo

            # Store transition in the replay buffers
            replay_buffer_1.add(obs, action_1, rew_1, new_obs, float(done))
            if multiplayer:
                # pass reward_2 to the second player
                # this reward will vary based on the game
                replay_buffer_2.add(obs, action_2, rew_2, new_obs, float(done))
            obs = new_obs
            # separate rewards for each model
            episode_rewards_1[-1] += rew_1
            if multiplayer:
                episode_rewards_2[-1] += rew_2
            if done:
                obs = env.reset()
                episode_rewards_1.append(0.0)
                if multiplayer:
                    episode_rewards_2.append(0.0)
                reset = True
            if actual_t > learning_starts and actual_t % train_freq == 0:

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                # sample from the two replay buffers
                if prioritized_replay:
                    experience_1 = replay_buffer_1.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t_1, actions_1, rewards_1, obses_tp1_1, dones_1,
                     weights_1, batch_idxes_1) = experience_1
                    # keep all the variables with separate names
                    if multiplayer:
                        experience_2 = replay_buffer_2.sample(
                            batch_size, beta=beta_schedule.value(t))
                        (obses_t_2, actions_2, rewards_2, obses_tp1_2, dones_2,
                         weights_2, batch_idxes_2) = experience_2
                # do the same if there's no prioritization
                else:
                    obses_t_1, actions_1, rewards_1, obses_tp1_1, dones_1 = replay_buffer_1.sample(
                        batch_size)
                    weights_1, batch_idxes_1 = np.ones_like(rewards_1), None
                    if multiplayer:
                        obses_t_2, actions_2, rewards_2, obses_tp1_2, dones_2 = replay_buffer_2.sample(
                            batch_size)
                        weights_2, batch_idxes_2 = np.ones_like(
                            rewards_2), None
                # actually train the model based on the samples
                td_errors_1 = train_1(obses_t_1, actions_1, rewards_1,
                                      obses_tp1_1, dones_1, weights_1)
                if multiplayer:
                    td_errors_2 = train_2(obses_t_2, actions_2, rewards_2,
                                          obses_tp1_2, dones_2, weights_2)
                # give new priority weights to the observations
                if prioritized_replay:
                    new_priorities_1 = np.abs(
                        td_errors_1) + prioritized_replay_eps
                    replay_buffer_1.update_priorities(batch_idxes_1,
                                                      new_priorities_1)
                    if multiplayer:
                        new_priorities_2 = np.abs(
                            td_errors_2) + prioritized_replay_eps
                        replay_buffer_2.update_priorities(
                            batch_idxes_2, new_priorities_2)

            if actual_t > learning_starts and actual_t % target_network_update_freq == 0:
                # Update target networks periodically.
                update_target_1()
                if multiplayer:
                    update_target_2()

            # this section is for the purposes of logging stuff
            # calculate the average reward over the last 100 episodes
            mean_100ep_reward_1 = round(np.mean(episode_rewards_1[-101:-1]), 1)
            if multiplayer:
                mean_100ep_reward_2 = round(
                    np.mean(episode_rewards_2[-101:-1]), 1)
            num_episodes = len(episode_rewards_1)
            # every given number of episodes log and print out the appropriate stuff
            if done and print_freq is not None and len(
                    episode_rewards_1) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                # print out both rewards if multiplayer
                if multiplayer:
                    logger.record_tabular("mean 100 episode reward 1",
                                          mean_100ep_reward_1)
                    logger.record_tabular("mean 100 episode reward 2",
                                          mean_100ep_reward_2)
                else:
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward_1)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            # save best-performing version of each model
            # I've opted out of this for competitive multiplayer because it's difficult to determine what's "best"

            if (checkpoint_freq is not None and actual_t > learning_starts
                    and num_episodes > 100
                    and actual_t % checkpoint_freq == 0):
                # if there's a best reward, save it as the new best model
                if saved_mean_reward_1 is None or mean_100ep_reward_1 > saved_mean_reward_1:
                    if print_freq is not None:
                        if multiplayer:
                            logger.log(
                                "Saving model 1 due to mean reward increase: {} -> {}"
                                .format(saved_mean_reward_1,
                                        mean_100ep_reward_1))
                        else:
                            logger.log(
                                "Saving model due to mean reward increase: {} -> {}"
                                .format(saved_mean_reward_1,
                                        mean_100ep_reward_1))
                    save_variables(model_file_1, sess_1)
                    model_saved_1 = True
                    saved_mean_reward_1 = mean_100ep_reward_1

                if multiplayer and (saved_mean_reward_2 is None or
                                    mean_100ep_reward_2 > saved_mean_reward_2):
                    if print_freq is not None:
                        logger.log(
                            "Saving model 2 due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward_2, mean_100ep_reward_2))
                    save_variables(model_file_2, sess_2)
                    model_saved_2 = True
                    saved_mean_reward_2 = mean_100ep_reward_2

        # restore models at the end to the best performers
        if model_saved_1:
            if print_freq is not None:
                logger.log("Restored model 1 with mean reward: {}".format(
                    saved_mean_reward_1))
            load_variables(model_file_1, sess_1)
        if multiplayer and model_saved_2:
            if print_freq is not None:
                logger.log("Restored model 2 with mean reward: {}".format(
                    saved_mean_reward_2))
            load_variables(model_file_2, sess_2)
    return act_1, act_2, sess_1, sess_2
Ejemplo n.º 11
0
def train(train_params, policy_params, env_id):
	# Refresh training progress log	
	logger._configure_default_logger()

	from baselines.ppo1 import mlp_policy, pposgd_simple
	U.make_session(num_cpu=1).__enter__()
	def policy_fn(name, ob_space, ac_space):
		return mlp_policy.MlpPolicy(
			name=name, ob_space=ob_space, ac_space=ac_space, # set tensor
			hid_size=policy_params.nodes_per_layer, # set nodes
			num_hid_layers=policy_params.num_layers) # set layers


	# Set up environment
	env = gym.make(env_id)
	env = RewScale(env, 0.1)
	# Seed Set
	rank = MPI.COMM_WORLD.Get_rank()
	workerseed = train_params.seed + 1000000 * rank
	env.seed(workerseed)
	print('----------=================--------------')
	print('rank: ', rank, 'workerseed: ', workerseed)
	print('----------=================--------------')

	# Run Training with stochastic gradient descent
	pi = pposgd_simple.learn(env, policy_fn,
                max_timesteps=train_params.num_timesteps,
                timesteps_per_actorbatch=train_params.timesteps_per_actorbatch,
                clip_param=train_params.clip_param, 
		entcoeff=train_params.entcoeff,
                optim_epochs=train_params.optim_epochs,
                optim_stepsize=train_params.optim_stepsize,
                optim_batchsize=train_params.optim_batchsize,
                gamma=train_params.gamma,
                lam=train_params.lam,
                schedule=train_params.schedule,
	)
	env.close

	# Save Trained Model an meta data
	print(train_params.model_path)
	if train_params.model_path:
		# Make Dir
		model_log_dir=os.environ['GYMFC_EXP_MODELSDIR']+train_params.model_name
		os.makedirs(train_params.model_dir, exist_ok=True)

		# Merge Metadata
		meta_data = {**vars(train_params), **vars(policy_params)}

		# Save Metadata as csv
		md_file = train_params.model_dir+'/'+'metadata.csv'
		md_keys = meta_data.keys()
		try:
			with open(md_file, 'w') as mdfile:
				writer = csv.DictWriter(mdfile, fieldnames = md_keys)
				writer.writeheader()
				writer.writerow(meta_data)
		except IOError:
			print("I/O error")

		# Save Model
		U.save_variables(train_params.model_path)

		# Save Training Progress file
		log_path = os.environ['OPENAI_LOGDIR']+'progress.csv'  # train prog log 
		copyfile(log_path, train_params.model_dir+'/'+'log.csv') # copy log csv
		
	else:
		print('model not named')
	return pi
Ejemplo n.º 12
0
 def save(self, path, sess):
     save_variables(path, sess)
Ejemplo n.º 13
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=1000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          save_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """

    logger = logging.getLogger()
    coloredlogs.install(
        level='DEBUG',
        fmt=
        '%(asctime)s,%(msecs)03d %(filename)s[%(process)d] %(levelname)s %(message)s'
    )
    logger.setLevel(logging.DEBUG)

    # DATAVAULT: Set up list of action meanings and two lists to store episode
    # and total sums for each possible action in the list.
    action_names = env.unwrapped.get_action_meanings()
    action_episode_sums = []
    action_total_sums = []
    for x in range(len(action_names)):
        action_episode_sums.append(0)
        action_total_sums.append(0)

    # And obviously, you need a datavault item
    dv = DataVault()

    # Create all the functions necessary to train the model
    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        #DATAVAULT: This is where you usually want to scrape data - in the timestep loop
        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            # if environment is pacman, limit moves to four directions
            name = env.unwrapped.spec.id
            if name == "MsPacmanNoFrameskip-v4":
                while True:
                    step_return = act(np.array(obs)[None],
                                      update_eps=update_eps,
                                      **kwargs)
                    action = step_return[0][0]

                    env_action = action
                    q_values = np.squeeze(step_return[1])
                    # test for break condition
                    if 1 <= action <= 4:
                        break
            else:
                step_return = act(np.array(obs)[None],
                                  update_eps=update_eps,
                                  **kwargs)
                action = step_return[0][0]
                q_values = np.squeeze(step_return[1])
                env_action = action
            reset = False

            new_obs, rew, done, info = env.step(env_action)
            # DATAVAULT: after each step, we push the information out to the datavault
            lives = env.ale.lives()
            #store_data(self, action, action_name, action_episode_sums, action_total_sums, reward, done, info, lives, q_values, observation, mean_reward):
            action_episode_sums, action_total_sums = dv.store_data(
                action, action_names[action], action_episode_sums,
                action_total_sums, rew, done, info, lives, q_values, new_obs,
                saved_mean_reward)

            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()
            if (len(episode_rewards[-101:-1]) > 0):
                mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            else:
                mean_100ep_reward = 0
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    dv.make_dataframes()
    print("Save path is: ")
    print(save_path)
    # use parent dir to save data, so we can keep the current folder small and portable
    directory = os.path.abspath(os.path.join(save_path, os.pardir))
    csv_path = os.path.join(directory, 'CSVs')
    os.mkdir(csv_path)
    dv.df_to_csv(csv_path)
    return act
Ejemplo n.º 14
0
 def save(self, save_path):
     U.save_variables(save_path)
Ejemplo n.º 15
0
 def save(self, path):
     save_variables(path)
Ejemplo n.º 16
0
def do_agent_exploration(updates_queue: multiprocessing.Queue,
                         q_func_vars_trained_queue: multiprocessing.Queue,
                         network, seed, config, lr, total_timesteps,
                         learning_starts, buffer_size, exploration_fraction,
                         exploration_initial_eps, exploration_final_eps,
                         train_freq, batch_size, print_freq, checkpoint_freq,
                         gamma, target_network_update_freq, prioritized_replay,
                         prioritized_replay_alpha, prioritized_replay_beta0,
                         prioritized_replay_beta_iters, prioritized_replay_eps,
                         experiment_name, load_path, network_kwargs):
    env = DotaEnvironment()

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph
    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, _, _, debug = deepq.build_train(
        scope='deepq_act',
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=exploration_initial_eps,
                                 final_p=exploration_final_eps)

    U.initialize()

    reward_shaper = ActionAdviceRewardShaper(config=config)
    reward_shaper.load()
    reward_shaper.generate_merged_demo()

    full_exp_name = '{}-{}'.format(date.today().strftime('%Y%m%d'),
                                   experiment_name)
    experiment_dir = os.path.join('experiments', full_exp_name)
    os.makedirs(experiment_dir, exist_ok=True)

    summary_dir = os.path.join(experiment_dir, 'summaries')
    os.makedirs(summary_dir, exist_ok=True)
    summary_writer = tf.summary.FileWriter(summary_dir)
    checkpoint_dir = os.path.join(experiment_dir, 'checkpoints')
    os.makedirs(checkpoint_dir, exist_ok=True)
    stats_dir = os.path.join(experiment_dir, 'stats')
    os.makedirs(stats_dir, exist_ok=True)

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_dir or td

        os.makedirs(td, exist_ok=True)
        model_file = os.path.join(td, "best_model")
        model_saved = False
        saved_mean_reward = None

        # if os.path.exists(model_file):
        #     print('Model is loading')
        #     load_variables(model_file)
        #     logger.log('Loaded model from {}'.format(model_file))
        #     model_saved = True
        # elif load_path is not None:
        #     load_variables(load_path)
        #     logger.log('Loaded model from {}'.format(load_path))

        def synchronize_q_func_vars():
            updates_queue.put(
                UpdateMessage(UPDATE_STATUS_SEND_WEIGHTS, None, None))
            q_func_vars_trained = q_func_vars_trained_queue.get()
            update_q_func_expr = []
            for var, var_trained in zip(debug['q_func_vars'],
                                        q_func_vars_trained):
                update_q_func_expr.append(var.assign(var_trained))
            update_q_func_expr = tf.group(*update_q_func_expr)
            sess.run(update_q_func_expr)

        synchronize_q_func_vars()

        episode_rewards = []
        act_step_t = 0
        while act_step_t < total_timesteps:
            # Reset the environment
            obs = env.reset()
            obs = StatePreprocessor.process(obs)
            episode_rewards.append(0.0)
            done = False
            # Demo preservation variables
            demo_picked = 0
            demo_picked_step = 0
            # Demo switching statistics
            demo_switching_stats = [(0, 0)]
            # Sample the episode until it is completed
            act_started_step_t = act_step_t
            while not done:
                # Take action and update exploration to the newest value
                biases, demo_indexes = reward_shaper.get_action_potentials_with_indexes(
                    obs, act_step_t)
                update_eps = exploration.value(act_step_t)
                actions, is_randoms = act(np.array(obs)[None],
                                          biases,
                                          update_eps=update_eps)
                action, is_random = actions[0], is_randoms[0]
                if not is_random:
                    bias_demo = demo_indexes[action]
                    if bias_demo != demo_switching_stats[-1][1]:
                        demo_switching_stats.append(
                            (act_step_t - act_started_step_t, bias_demo))
                    if bias_demo != 0 and demo_picked == 0:
                        demo_picked = bias_demo
                        demo_picked_step = act_step_t + 1
                pairs = env.step(action)
                action, (new_obs, rew, done, _) = pairs[-1]
                logger.log(
                    f'{act_step_t}/{total_timesteps} obs {obs} action {action}'
                )

                # Compute state on the real reward but learn from the normalized version
                episode_rewards[-1] += rew
                rew = np.sign(rew) * np.log(1 + np.abs(rew))
                new_obs = StatePreprocessor.process(new_obs)

                if len(new_obs) == 0:
                    done = True
                else:
                    transition = (obs, action, rew, new_obs, float(done),
                                  act_step_t)
                    obs = new_obs
                    act_step_t += 1
                    if act_step_t - demo_picked_step >= MIN_STEPS_TO_FOLLOW_DEMO_FOR:
                        demo_picked = 0
                    reward_shaper.set_demo_picked(act_step_t, demo_picked)
                    updates_queue.put(
                        UpdateMessage(UPDATE_STATUS_CONTINUE, transition,
                                      demo_picked))
            # Post episode logging
            summary = tf.Summary(value=[
                tf.Summary.Value(tag="rewards",
                                 simple_value=episode_rewards[-1])
            ])
            summary_writer.add_summary(summary, act_step_t)
            summary = tf.Summary(
                value=[tf.Summary.Value(tag="eps", simple_value=update_eps)])
            summary_writer.add_summary(summary, act_step_t)
            summary = tf.Summary(value=[
                tf.Summary.Value(tag="episode_steps",
                                 simple_value=act_step_t - act_started_step_t)
            ])
            summary_writer.add_summary(summary, act_step_t)
            mean_5ep_reward = round(float(np.mean(episode_rewards[-5:])), 1)
            num_episodes = len(episode_rewards)
            if print_freq is not None and num_episodes % print_freq == 0:
                logger.record_tabular("steps", act_step_t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 5 episode reward", mean_5ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(act_step_t)))
                logger.dump_tabular()
            # Wait for the learning to finish and synchronize
            synchronize_q_func_vars()
            # Record demo_switching_stats
            if num_episodes % 10 == 0:
                save_demo_switching_stats(demo_switching_stats, stats_dir,
                                          num_episodes)
            if checkpoint_freq is not None and num_episodes % checkpoint_freq == 0:
                # Periodically save the model
                rec_model_file = os.path.join(
                    td, "model_{}_{:.2f}".format(num_episodes,
                                                 mean_5ep_reward))
                save_variables(rec_model_file)
                # Check whether the model is the best so far
                if saved_mean_reward is None or mean_5ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_5ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_5ep_reward

        updates_queue.put(UpdateMessage(UPDATE_STATUS_FINISH, None, None))
Ejemplo n.º 17
0
def learn(env,
          network,
          seed=None,
          pool=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_initial_eps=1.0,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=1,
          checkpoint_freq=100,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          experiment_name='unnamed',
          load_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    experiment_name: str
        name of the experiment (default: trial)
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=exploration_initial_eps,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    reward_shaper = ActionAdviceRewardShaper('../completed-observations')
    reward_shaper.load()

    full_exp_name = '{}-{}'.format(date.today().isoformat(), experiment_name)
    experiment_dir = os.path.join('experiments', full_exp_name)
    if not os.path.exists(experiment_dir):
        os.makedirs(experiment_dir)

    summary_dir = os.path.join(experiment_dir, 'summaries')
    os.makedirs(summary_dir, exist_ok=True)
    summary_writer = tf.summary.FileWriter(summary_dir)

    checkpoint_dir = os.path.join(experiment_dir, 'checkpoints')
    os.makedirs(checkpoint_dir, exist_ok=True)

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_dir or td

        os.makedirs(td, exist_ok=True)
        model_file = os.path.join(td, "best_model")
        model_saved = False
        saved_mean_reward = None

        if os.path.exists(model_file):
            print('Model is loading')
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        episode_rewards = []
        update_step_t = 0
        while update_step_t < total_timesteps:
            # Reset the environment
            obs = env.reset()
            obs = StatePreprocessor.process(obs)
            episode_rewards.append(0.0)
            reset = True
            done = False
            # Sample the episode until it is completed
            act_step_t = update_step_t
            while not done:
                if callback is not None:
                    if callback(locals(), globals()):
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not param_noise:
                    update_eps = exploration.value(act_step_t)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(act_step_t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = -np.log(
                        1. - exploration.value(act_step_t) +
                        exploration.value(act_step_t) /
                        float(env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                biases = reward_shaper.get_action_potentials(obs)
                action = act(np.array(obs)[None],
                             biases,
                             update_eps=update_eps,
                             **kwargs)[0]
                reset = False

                pairs = env.step(action)
                action, (new_obs, rew, done, _) = pairs[-1]
                # Write down the real reward but learn from normalized version
                episode_rewards[-1] += rew
                rew = np.sign(rew) * np.log(1 + np.abs(rew))
                new_obs = StatePreprocessor.process(new_obs)

                logger.log('{}/{} obs {} action {}'.format(
                    act_step_t, total_timesteps, obs, action))
                act_step_t += 1
                if len(new_obs) == 0:
                    done = True
                else:
                    replay_buffer.add(obs, action, rew, new_obs, float(done))
                    obs = new_obs
            # Post episode logging
            summary = tf.Summary(value=[
                tf.Summary.Value(tag="rewards",
                                 simple_value=episode_rewards[-1])
            ])
            summary_writer.add_summary(summary, act_step_t)
            summary = tf.Summary(
                value=[tf.Summary.Value(tag="eps", simple_value=update_eps)])
            summary_writer.add_summary(summary, act_step_t)
            summary = tf.Summary(value=[
                tf.Summary.Value(tag="episode_steps",
                                 simple_value=act_step_t - update_step_t)
            ])
            summary_writer.add_summary(summary, act_step_t)
            mean_5ep_reward = round(np.mean(episode_rewards[-5:]), 1)
            num_episodes = len(episode_rewards)
            if print_freq is not None and num_episodes % print_freq == 0:
                logger.record_tabular("steps", act_step_t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 5 episode reward", mean_5ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(act_step_t)))
                logger.dump_tabular()
            # Do the learning
            start = time.time()
            while update_step_t < min(act_step_t, total_timesteps):
                if update_step_t % train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if prioritized_replay:
                        experience = replay_buffer.sample(
                            batch_size,
                            beta=beta_schedule.value(update_step_t))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                            batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    biases_t = pool.map(reward_shaper.get_action_potentials,
                                        obses_t)
                    biases_tp1 = pool.map(reward_shaper.get_action_potentials,
                                          obses_tp1)
                    td_errors, weighted_error = train(obses_t, biases_t,
                                                      actions, rewards,
                                                      obses_tp1, biases_tp1,
                                                      dones, weights)

                    # Loss logging
                    summary = tf.Summary(value=[
                        tf.Summary.Value(tag='weighted_error',
                                         simple_value=weighted_error)
                    ])
                    summary_writer.add_summary(summary, update_step_t)

                    if prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + prioritized_replay_eps
                        replay_buffer.update_priorities(
                            batch_idxes, new_priorities)
                if update_step_t % target_network_update_freq == 0:
                    # Update target network periodically.
                    update_target()
                update_step_t += 1
            stop = time.time()
            logger.log("Learning took {:.2f} seconds".format(stop - start))
            if checkpoint_freq is not None and num_episodes % checkpoint_freq == 0:
                # Periodically save the model and the replay buffer
                rec_model_file = os.path.join(
                    td, "model_{}_{:.2f}".format(num_episodes,
                                                 mean_5ep_reward))
                save_variables(rec_model_file)
                buffer_file = os.path.join(
                    td, "buffer_{}_{}".format(num_episodes, update_step_t))
                with open(buffer_file, 'wb') as foutput:
                    cloudpickle.dump(replay_buffer, foutput)
                # Check whether it is best
                if saved_mean_reward is None or mean_5ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_5ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_5ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 18
0
 def save(self, save_path):
     if save_path is not None:
         info('saving vars to ' + save_path)
         U.save_variables(save_path)
     else:
         info('save_path is None, not saving')
Ejemplo n.º 19
0
 def save(self, path):
     save_variables(path)
Ejemplo n.º 20
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          initial_exploration_p=1.0,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=100,
          prioritized_replay=True,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          pretraining_obs=None,
          pretraining_targets=None,
          pretrain_steps=1000,
          pretrain_experience=None,
          pretrain_num_episodes=0,
          double_q=True,
          expert_qfunc=None,
          aggrevate_steps=0,
          pretrain_lr=1e-4,
          sampling_starts=0,
          beb_agent=None,
          qvalue_file="qvalue.csv",
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    beb_agent: takes Q values and suggests actions after adding beb bonus
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    nenvs = env.num_envs
    print("Bayes-DeepQ:", env.num_envs)
    print("Total timesteps", total_timesteps)
    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, train_target, copy_target_to_q, debug = brl_deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        pretrain_optimizer=tf.train.AdamOptimizer(learning_rate=pretrain_lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        double_q=double_q)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=initial_exploration_p,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        print("Model will be saved at ", model_file)
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))
            print('Loaded model from {}'.format(load_path))

    if pretraining_obs is not None:
        # pretrain target and copy to qfunc
        print("Pretrain steps ", pretrain_steps)
        for i in range(pretrain_steps):
            pretrain_errors = train_target(pretraining_obs,
                                           pretraining_targets)
            if i % 500 == 0:
                print("Step {}".format(i), np.mean(pretrain_errors))
            if np.mean(pretrain_errors) < 1e-5:
                break

        min_rew = 0
        # copy all pre-experiences
        if pretrain_experience is not None:
            for obs, action, rew, new_obs, done in zip(*pretrain_experience):
                replay_buffer.add(obs, action, rew, new_obs, float(done))
            print("Added {} samples to ReplayBuffer".format(
                len(replay_buffer._storage)))
            min_rew = min(rew, min_rew)
        print("Pretrain Error", np.mean(pretrain_errors))
    else:
        print("Skipping pretraining")

    update_target()
    print("Save the pretrained model", model_file)
    save_variables(model_file)

    episode_reward = np.zeros(nenvs, dtype=np.float32)
    saved_mean_reward = None
    obs = env.reset()
    reset = True
    epoch_episode_rewards = []
    epoch_episode_steps = []
    epoch_actions = []
    epoch_episodes = 0
    episode_rewards_history = deque(maxlen=100)
    episode_step = np.zeros(nenvs, dtype=int)
    episodes = 0  #scalar

    start = 0

    if expert_qfunc is None:
        aggrevate_steps = 0

    # if pretraining_obs is None or pretraining_obs.size == 0:
    #     episode_rewards = []
    # else:
    #     episode_rewards = [[0.0]] * pretrain_num_episodes
    #     start = len(pretraining_obs)
    #     if print_freq is not None:
    #         for t in range(0, len(pretraining_obs), print_freq):
    #             logger.record_tabular("steps", t)
    #             logger.record_tabular("episodes", pretrain_num_episodes)
    #             logger.record_tabular("mean 100 episode reward", min_rew)
    #             logger.record_tabular("% time spent exploring", 0)
    #             logger.dump_tabular()
    #             print("pretraining episodes", pretrain_num_episodes, "steps {}/{}".format(t, total_timesteps))

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        print("Aggrevate: Model will be saved at ", model_file)
        model_saved = False

        for i in range(aggrevate_steps):
            obses_t, values = [], []
            for j in range(30):
                # TODO: 30 should be changed to max horizon?
                t = np.random.randint(50) + 1

                obs = env.reset()
                for k in range(t):
                    action, value = act(np.array(obs)[None],
                                        update_eps=exploration.value(i))
                    obs, rew, done, _ = env.step(action)

                obses_t.extend(obs)
                # Roll out expert policy
                episode_reward[:] = 0
                dones = np.array([False] * obs.shape[0])
                for k in range(51 - t):
                    obs, rew, done, _ = env.step(
                        [expert_qfunc.step(o) for o in obs])
                    dones[done] = True
                    rew[dones] = 0
                    episode_reward += 0.95**k * rew

                # TODO: change this to exploration-savvy action
                # action = np.random.randint(env.action_space.n, size=len(obs))
                # Rocksample specific, take sensing actions
                # prob = np.array([1] * 6 + [2] * (env.action_space.n - 6), dtype=np.float32)
                # prob = prob / np.sum(prob)
                # action = np.random.choice(env.action_space.n, p=prob, size=len(action))
                # new_obs, rew, done, _ = env.step(action)

                # value = rew.copy()
                # value[np.logical_not(done)] += gamma * np.max(expert_qfunc.value(new_obs[np.logical_not(done)]), axis=1)
                # current_value[tuple(np.array([np.arange(len(action)), action]))] = value

                # episode reward
                # episode_reward[np.logical_not(done)] += np.max(current_value[np.logical_not(done)], axis=1)
                # episode_rewards_history.extend(np.max(current_value, axis=1))
                value[tuple([np.arange(len(action)), action])] = episode_reward
                values.extend(value)

            print("Aggrevate got {} / {} new data".format(
                obs.shape[0] * 30, len(obses_t)))
            # print("Mean expected cost at the explored points", np.mean(np.max(values, axis=1)))
            for j in range(1000):
                obs, val = np.array(obses_t), np.array(values)
                # indices = np.random.choice(len(obs), min(1000, len(obses_t)))
                aggrevate_errors = train_target(obs, val)
                if np.mean(aggrevate_errors) < 1e-5:
                    print("Aggrevate Step {}, {}".format(i, j),
                          np.mean(aggrevate_errors))
                    break
            print("Aggrevate Step {}, {}".format(i, j),
                  np.mean(aggrevate_errors))
            update_target()
            print("Save the aggrevate model", i, model_file)

            # Evaluate current policy
            episode_reward[:] = 0
            obs = env.reset()
            num_episodes = 0
            k = np.zeros(len(obs))
            while num_episodes < 100:
                action, _ = act(np.array(obs)[None], update_eps=0.0)
                # print(action)
                obs, rew, done, _ = env.step(action)
                episode_reward += 0.95**k * rew
                k += 1
                for d in range(len(done)):
                    if done[d]:
                        episode_rewards_history.append(episode_reward[d])
                        episode_reward[d] = 0.
                        k[d] = 0
                        num_episodes += 1
            mean_1000ep_reward = round(np.mean(episode_rewards_history), 2)
            print("Mean discounted reward", mean_1000ep_reward)
            logger.record_tabular("mean 100 episode reward",
                                  mean_1000ep_reward)
            logger.dump_tabular()
            save_variables(model_file)

        t = 0  # could start from pretrain-steps
        epoch = 0
        while True:
            epoch += 1
            if t >= total_timesteps:
                break

            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True

            # no randomization
            # update_eps = 0
            print('update_eps', int(100 * exploration.value(t)))
            qv_error = []

            obs = env.reset()
            for m in range(100):

                action, q_values = act(np.array(obs)[None],
                                       update_eps=update_eps,
                                       **kwargs)
                if beb_agent is not None:
                    action = beb_agent.step(obs, action, q_values,
                                            exploration.value(t))
                # if expert_qfunc is not None:
                #     v = expert_qfunc.value(obs)
                #     qv_error += [v - q_values[0]]

                env_action = action
                reset = False
                new_obs, rew, done, info = env.step(env_action)

                if t >= sampling_starts:
                    # Store transition in the replay buffer.
                    replay_buffer.add(obs, action, rew, new_obs, done)
                obs = new_obs

                episode_reward += rew
                episode_step += 1

                for d in range(len(done)):
                    if done[d]:
                        # Episode done.

                        # discount(np.array(rewards), gamma) consider doing discount
                        epoch_episode_rewards.append(episode_reward[d])
                        episode_rewards_history.append(episode_reward[d])
                        epoch_episode_steps.append(episode_step[d])
                        episode_reward[d] = 0.
                        episode_step[d] = 0
                        epoch_episodes += 1
                        episodes += 1

            t += 100 * nenvs

            if t > learning_starts:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None

                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)

                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if target_network_update_freq is not None and t > sampling_starts \
                and epoch % target_network_update_freq == 0:
                # Update target network periodically.
                print("Update target")
                update_target()

            mean_1000ep_reward = round(np.mean(episode_rewards_history), 2)
            num_episodes = episodes

            if print_freq is not None:
                logger.record_tabular("steps", t)
                logger.record_tabular("td errors", np.mean(td_errors))
                logger.record_tabular("td errors std",
                                      np.std(np.abs(td_errors)))
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 1000 episode reward",
                                      mean_1000ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()
                print("episodes", num_episodes,
                      "steps {}/{}".format(t, total_timesteps))

            if (checkpoint_freq is not None and t > learning_starts
                    and len(episode_rewards_history) >= 1000):
                if saved_mean_reward is None or mean_1000ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_1000ep_reward))
                        print("saving model")
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_1000ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.5,
          initial_exploration_p=1.0,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=1,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1,
          gamma=1.0,
          target_network_update_freq=40000,#10000,
          prioritized_replay=True,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          double_q=True,
          obs_dim=None,
          qmdp_expert=None,
          **network_kwargs
            ):
    """Train a bootstrap-dqn model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    qmdp_expert: takes obs, bel -> returns qmdp q-vals
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    nenvs = env.num_envs
    print("Bootstrap DQN with {} envs".format(nenvs))

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph
    # import IPython; IPython.embed()
    #assert isinstance(env.envs[0].env.env.env, ExplicitBayesEnv)
    #belief_space = env.envs[0].env.env.env.belief_space
    #observation_space = env.envs[0].env.env.env.internal_observation_space

    obs_space = env.observation_space

    assert obs_dim is not None

    observation_space = Box(obs_space.low[:obs_dim], obs_space.high[:obs_dim], dtype=np.float32)
    belief_space = Box(obs_space.low[obs_dim:], obs_space.high[obs_dim:], dtype=np.float32)

    num_experts = belief_space.high.size

    # print("Num experts", num_experts)

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    def make_bel_ph(name):
        return ObservationInput(belief_space, name=name)

    q_func = build_q_func(network, num_experts, **network_kwargs)

    print('=============== got qfunc ============== ')

    act, train, update_target, debug = residual_bqn_fixed_expert.build_train(
        make_obs_ph=make_obs_ph,
        make_bel_ph=make_bel_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        double_q=double_q
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=initial_exploration_p,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_reward = np.zeros(nenvs, dtype = np.float32)
    saved_mean_reward = None
    reset = True
    epoch_episode_rewards = []
    epoch_episode_steps = []
    epoch_actions = []
    epoch_episodes = 0
    episode_rewards_history = deque(maxlen=1000)
    episode_step = np.zeros(nenvs, dtype = int)
    episodes = 0 #scalar


    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        print("Model will be saved at " , model_file)
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))
            print('Loaded model from {}'.format(load_path))

        t = 0
        while t < total_timesteps:
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            update_eps = exploration.value(t)
            update_param_noise_threshold = 0.

            obs = env.reset()
            episode_reward = np.zeros(nenvs, dtype = np.float32)
            episode_step[:] = 0
            obs, bel = obs[:, :-belief_space.shape[0]], obs[:, -belief_space.shape[0]:]

            expert_qval = qmdp_expert(obs, bel)

            start_time = timer.time()
            horizon = 100
            for m in range(horizon):
                action, q_values = act(np.array(obs)[None], np.array(bel)[None], np.array(expert_qval)[None], update_eps=update_eps, **kwargs)
                env_action = action

                new_obs, rew, done, info = env.step(env_action)
                new_obs, new_bel = new_obs[:, :-belief_space.shape[0]], new_obs[:, -belief_space.shape[0]:]

                new_expert_qval = qmdp_expert(new_obs, new_bel)

                # Store transition in the replay buffer.
                replay_buffer.add(obs, bel, expert_qval, action, rew, new_obs, new_bel, new_expert_qval, done)

                # if np.random.rand() < 0.05:
                # #     # write to file
                # #     with open('rbqn_fixed_expert.csv', 'a') as f:
                # #         out = ','.join(str(np.around(x,2)) for x in [bel[0], obs[0], q_values[0]])
                #         # f.write(out + "\n")

                #     print(np.around(bel[-1], 2), rew[-1], np.around(q_values[-1], 1), np.around(expert_qval[-1], 1))

                obs = new_obs
                bel = new_bel
                expert_qval = new_expert_qval

                episode_reward += 0.95 ** episode_step * rew
                episode_step += 1

                # print(action, done, obs)

                for d in range(len(done)):
                    if done[d]:
                        epoch_episode_rewards.append(episode_reward[d])
                        episode_rewards_history.append(episode_reward[d])
                        epoch_episode_steps.append(episode_step[d])
                        episode_reward[d] = 0.
                        episode_step[d] = 0
                        epoch_episodes += 1
                        episodes += 1

            # import IPython;IPython.embed(); #import sys; sys.exit(0)

            print("Took {}".format(timer.time() - start_time))

            t += horizon * nenvs

            num_experts = 16
            if t > learning_starts and t % train_freq == 0:
                for _ in range(num_experts):
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if prioritized_replay:
                        experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                        if experience is None:
                            continue
                        obses_t, bels_t, expert_qvals, actions, rewards, obses_tp1, bels_tp1, expert_qvals_1, dones, weights, batch_idxes = experience
                    else:
                        experience = replay_buffer.sample(batch_size)
                        if experience is None:
                            continue

                        obses_t, bels_t, expert_qvals, actions, rewards, obses_tp1, bels_tp1, expert_qvals_1, dones = experience
                        weights, batch_idxes = np.ones_like(rewards), None
                    td_errors = train(obses_t, bels_t, expert_qvals, actions, rewards, obses_tp1, bels_tp1, expert_qvals_1, dones, weights)

                    if np.random.rand() < 0.01:
                        print("TD error", np.around(td_errors, 1))

                    if prioritized_replay:
                        new_priorities = np.abs(td_errors) + prioritized_replay_eps
                        replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                print("Update target")
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards_history), 2)
            num_episodes = episodes

            if print_freq is not None and num_episodes % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 1000 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()
                print("episodes   ", num_episodes, "steps {}/{}".format(t, total_timesteps))
                print("mean reward", mean_100ep_reward)
                print("exploration",  int(100 * exploration.value(t)))

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                        print("saving model")
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 22
0
 def save_policy(self, name):
     U.save_variables(name, variables=self.get_variables())
Ejemplo n.º 23
0
def learn(env,
          policy_func,
          dataset,
          optim_batch_size=100,
          max_iters=1e4,
          adam_epsilon=1e-5,
          optim_stepsize=1e-3,
          ckpt_dir=None,
          log_dir=None,
          task_name=None,
          verbose=False):

    val_per_iter = int(max_iters / 10)
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi", ob_space,
                     ac_space)  # Construct network for new polic
    dof = 5
    # placeholder
    ob_config = U.get_placeholder_cached(name="ob")
    ob_target = U.get_placeholder_cached(name="goal")
    obs_pos = U.get_placeholder_cached(name="obs_pos")
    obs_ori = U.get_placeholder_cached(name="obs_ori")
    ac = pi.pdtype.sample_placeholder([None])

    stochastic = U.get_placeholder_cached(name="stochastic")
    loss = tf.reduce_mean(tf.square(ac - pi.ac))
    #loss = tf.reduce_mean(pi.pd.neglogp(ac))
    #var_list = pi.get_trainable_variables()

    all_var_list = pi.get_variables()
    var_list = [
        v for v in all_var_list if v.name.startswith("pi/pol")
        or v.name.startswith("pi/logstd") or v.name.startswith("pi/obs")
    ]
    adam = MpiAdam(var_list, epsilon=adam_epsilon)
    lossandgrad = U.function(
        [ob_config, ob_target, obs_pos, obs_ori, ac, stochastic],
        [loss] + [U.flatgrad(loss, var_list)])

    U.initialize()
    if ckpt_dir is None:
        savedir_fname = tempfile.TemporaryDirectory().name
    else:
        savedir_fname = osp.join(ckpt_dir, 'model')
    if osp.exists(savedir_fname):
        try:
            U.load_variables(savedir_fname, pi.get_variables())
        except:
            print(
                "size of the pretrained model does not match the current model"
            )
    adam.sync()
    logger.log("Pretraining with Behavior Cloning...")
    thresh = 0.1
    for iter_so_far in tqdm(range(int(max_iters))):
        ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size,
                                                      'train')
        tar = ob_expert[:, dof:2 * dof]
        cur = ob_expert[:, :dof]
        avo = np.zeros_like(cur)
        for i in range(len(avo)):
            avo[i] = ac_expert[i] - thresh * (
                tar[i] - cur[i]) / np.linalg.norm(tar[i] - cur[i])
        # avo = ac_expert - thresh * (tar - cur) / np.linalg.norm(tar - cur)
        train_loss, g = lossandgrad(cur, tar, ob_expert[:, -6:-3],
                                    ob_expert[:, -3:], avo, True)
        adam.update(g, optim_stepsize)
        if verbose and iter_so_far % val_per_iter == 0:
            ob_expert, ac_expert = dataset.get_next_batch(-1, 'val')
            tar = ob_expert[:, dof:2 * dof]
            cur = ob_expert[:, :dof]
            avo = np.zeros_like(cur)
            for i in range(len(avo)):
                avo[i] = ac_expert[i] - thresh * (
                    tar[i] - cur[i]) / np.linalg.norm(tar[i] - cur[i])
            val_loss, _ = lossandgrad(cur, tar, ob_expert[:, -6:-3],
                                      ob_expert[:, -3:], avo, True)
            logger.log("Training loss: {}, Validation loss: {}".format(
                np.rad2deg(np.sqrt(train_loss)),
                np.rad2deg(np.sqrt(val_loss))))

    U.save_variables(savedir_fname, variables=pi.get_variables())
    return savedir_fname
Ejemplo n.º 24
0
def learn(env,
          policy_func,
          dataset,
          optim_batch_size=256,
          max_iters=5e3,
          adam_epsilon=1e-7,
          optim_stepsize=1e-4,
          ckpt_dir=None,
          log_dir=None,
          task_name=None,
          verbose=False):

    val_per_iter = int(max_iters / 10)
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi", ob_space,
                     ac_space)  # Construct network for new policy
    dof = 5
    # placeholder
    ob_config = U.get_placeholder_cached(name="ob")
    ob_target = U.get_placeholder_cached(name="goal")
    obs_pos = U.get_placeholder_cached(name="obs_pos")
    obs_ori = U.get_placeholder_cached(name="obs_ori")
    ac = pi.pdtype.sample_placeholder([None])

    stochastic = U.get_placeholder_cached(name="stochastic")
    loss = tf.reduce_mean(tf.square(ac - pi.ac))
    # loss = tf.reduce_mean(pi.pd.neglogp(ac))

    all_var_list = pi.get_trainable_variables()
    var_list = [
        v for v in all_var_list if v.name.startswith("pi/pol")
        or v.name.startswith("pi/logstd") or v.name.startswith("pi/obs")
    ]
    AdamOp = tf.train.AdamOptimizer(learning_rate=optim_stepsize,
                                    epsilon=adam_epsilon).minimize(
                                        loss, var_list=var_list)

    U.initialize()
    if ckpt_dir is None:
        savedir_fname = tempfile.TemporaryDirectory().name
    else:
        savedir_fname = osp.join(ckpt_dir, 'model')
    if osp.exists(savedir_fname):
        try:
            U.load_variables(savedir_fname, pi.get_variables())
        except:
            print(
                "size of the pretrained model does not match the current model"
            )

    logger.log("Pretraining with Behavior Cloning...")
    thresh = 0.1
    for iter_so_far in tqdm(range(int(max_iters))):
        ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size,
                                                      'train')
        tar = ob_expert[:, dof:2 * dof]
        cur = ob_expert[:, :dof]
        avo = np.zeros_like(cur)
        for i in range(len(avo)):
            avo[i] = ac_expert[i] - thresh * (
                tar[i] - cur[i]) / np.linalg.norm(tar[i] - cur[i])
        # avo = ac_expert - thresh * (tar - cur) / np.linalg.norm(tar - cur)

        U.get_session().run(AdamOp,
                            feed_dict={
                                ob_config: cur,
                                ob_target: tar,
                                obs_pos: ob_expert[:, -6:-3],
                                obs_ori: ob_expert[:, -3:],
                                ac: avo,
                                stochastic: True
                            })

        if verbose and iter_so_far % val_per_iter == 0:
            ob_expert, ac_expert = dataset.get_next_batch(-1, 'val')
            tar = ob_expert[:, dof:2 * dof]
            cur = ob_expert[:, :dof]
            avo = np.zeros_like(cur)
            for i in range(len(avo)):
                avo[i] = ac_expert[i] - thresh * (
                    tar[i] - cur[i]) / np.linalg.norm(tar[i] - cur[i])
            val_loss = U.get_session().run(loss,
                                           feed_dict={
                                               ob_config: cur,
                                               ob_target: tar,
                                               obs_pos: ob_expert[:, -6:-3],
                                               obs_ori: ob_expert[:, -3:],
                                               ac: avo,
                                               stochastic: True
                                           })
            logger.log("Validation loss: {}".format(
                np.rad2deg(np.sqrt(val_loss))))
    allvar = pi.get_variables()
    savevar = [v for v in allvar if "Adam" not in v.name]
    U.save_variables(savedir_fname, variables=savevar)
    return savedir_fname
Ejemplo n.º 25
0
 def save(self, save_path):
     U.save_variables(save_path, None, self.sess)
Ejemplo n.º 26
0
def run_main(opts):
    if os.path.exists(opts.model_dir):
        print('Path already exists. Remove? y for yes')
        input_char = getch.getch()
        if not input_char == 'y':
            print('Exiting')
            return
        shutil.rmtree(opts.model_dir)
    os.makedirs(opts.model_dir)
    os.makedirs(os.path.join(opts.model_dir, 'logs'))
    os.makedirs(os.path.join(opts.model_dir, 'weights'))

    # Create the environment with specified arguments
    state_data, action_data = process_data(opts.bc_data)

    #env = gym.make('MountainCar-v0')
    env = gym.make('LunarLander-v2')
    env._max_episode_steps = 1200

    x, model, logits = create_model()
    train, loss, labels = create_training(logits)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # Create summaries
    merged = tf.summary.merge_all()
    train_writer = tf.summary.FileWriter(os.path.join(opts.model_dir, 'logs'),
                                         sess.graph)

    print(tf.global_variables())
    sess.run(tf.global_variables_initializer())

    update = 0
    save_freq = 1000
    while True:
        for _ in range(25):
            # Get a random batch from the data
            batch_index = np.random.choice(len(state_data), 64)  #Batch size
            state_batch, action_batch = state_data[batch_index], action_data[
                batch_index]

            # Train the model.
            _, cur_loss, cur_summaries = sess.run([train, loss, merged],
                                                  feed_dict={
                                                      x: state_data,
                                                      labels: action_data
                                                  })
            print("Loss: {}".format(cur_loss))
            train_writer.add_summary(cur_summaries, update)
            update += 1

        if update % save_freq == 0:
            save_variables(os.path.join(opts.model_dir, 'weights',
                                        opts.model_weights),
                           sess=sess)

        done = False
        obs = env.reset()
        rewards = 0
        action_freq = [0 for _ in range(4)]
        while not done:
            env.render()

            # Handle the toggling of different application states
            action = sess.run(model, feed_dict={x: [obs.flatten()]})[0]

            action_freq[action] += 1

            obs, reward, done, info = env.step(action)
            rewards += reward

        reward_summary = tf.Summary(
            value=[tf.Summary.Value(tag='reward', simple_value=rewards)])
        act_summary = tf.Summary(value=[
            tf.Summary.Value(tag='act_distribution',
                             histo=log_histogram(action_freq, 1, bins=4))
        ])
        train_writer.add_summary(reward_summary, update // 25)
        train_writer.add_summary(act_summary, update // 25)

        print("Num updates: {}".format(update))
        print("Total reward: {}".format(rewards))
        print("Action dict: {}".format(action_freq))
Ejemplo n.º 27
0
 def save(self, path):
     U.save_variables(path, sess=self.sess)
Ejemplo n.º 28
0
def train_dqn(opts,
              seed=None,
              lr=1e-3,
              total_timesteps=500000,
              buffer_size=50000,
              exploration_fraction=0.1,
              exploration_final_eps=0.02,
              train_freq=1,
              batch_size=32,
              checkpoint_freq=500000,
              learning_starts=1000,
              gamma=1.000,
              target_network_update_freq=3000,
              load_path=None):
    """
    Runs the main recorder by binding certain discrete actions to keys.
    """
    if os.path.exists(opts.model_dir):
        print('Path already exists. Remove? y for yes')
        input_char = getch.getch()
        if not input_char == 'y':
            print('Exiting')
            return
        shutil.rmtree(opts.model_dir)
    os.makedirs(opts.model_dir)
    os.makedirs(os.path.join(opts.model_dir, 'logs'))
    os.makedirs(os.path.join(opts.model_dir, 'weights'))

    #env = gym.make('MountainCar-v0')
    env = gym.make('LunarLander-v2')
    env._max_episode_steps = 1200

    sess = get_session()
    set_global_seeds(seed)

    train_writer = tf.summary.FileWriter(os.path.join(opts.model_dir, 'logs'),
                                         sess.graph)

    q_func = build_q_func('mlp')

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10)
    replay_buffer = ReplayBuffer(buffer_size)

    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    obs = env.reset()

    for t in range(total_timesteps):
        # Take action and update exploration to the newest value
        env.render()
        update_eps = exploration.value(t)
        action = act(np.array(obs)[None], update_eps=update_eps)[0]
        new_obs, rew, done, _ = env.step(action)
        # Store transition in the replay buffer.
        replay_buffer.add(obs, action, rew, new_obs, float(done))
        obs = new_obs

        episode_rewards[-1] += rew
        if done:
            print("Exploration value: {}".format(exploration.value(t)))
            print("Last 25 episode rewards: {}".format(episode_rewards[-25:]))

            reward_summary = tf.Summary(value=[
                tf.Summary.Value(tag='reward',
                                 simple_value=episode_rewards[-1])
            ])
            train_writer.add_summary(reward_summary, t)

            obs = env.reset()
            episode_rewards.append(0.0)

        if t > learning_starts and t % train_freq == 0:
            # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
            obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                batch_size)
            weights, batch_idxes = np.ones_like(rewards), None
            td_errors, summary = train(obses_t, actions, rewards, obses_tp1,
                                       dones, weights)
            train_writer.add_summary(summary, t)

        if t > learning_starts and t % target_network_update_freq == 0:
            # Update target network periodically.
            update_target()

        if t > learning_starts and t % checkpoint_freq == 0:
            save_variables(
                os.path.join(opts.model_dir, 'weights', '{}.model'.format(t)))
    save_variables(os.path.join(opts.model_dir, 'weights', 'last.model'))
Ejemplo n.º 29
0
 def save_variables(self, save_path):
     tf_util.save_variables(save_path, sess=self.sess)
Ejemplo n.º 30
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          debug_flag=False,
          dpsr_replay=False,
          dpsr_replay_alpha1=0.6,
          dpsr_replay_alpha2=0.6,
          dpsr_replay_candidates_size=5,
          dpsr_common_replacement_candidates_number=128,
          dpsr_replay_beta_iters=None,
          dpsr_replay_beta0=0.4,
          dpsr_replay_eps=1e-6,
          dpsr_state_recycle_max_priority_set=True,
          state_recycle_freq=500,
          param_noise=False,
          callback=None,
          load_path=None,
          atari_env=True,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    checkpoint_path: str
        the saving path of the checkpoint files
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    debug_flag: bool
        if True DEBUG mode will be switched on
    dpsr_replay: bool
        if True DPSR replay buffer will be used
    dpsr_replay_alpha1: float
        alpha1 parameter for DPSR replay buffer
    dpsr_replay_alpha2: float
        alpha2 parameter for DPSR replay buffer
    dpsr_replay_candidates_size: int
        candidates size parameter for DPSR replay buffer state recycle
    dpsr_common_replacement_candidates_number: int
        candidates size parameter for DPSR replay buffer common replacement
    dpsr_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    dpsr_replay_beta0: float
        initial value of beta for prioritized replay buffer
    dpsr_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    dpsr_state_recycle_max_priority_set: bool
        if True priority will be set as MAX when doing state recycling
    state_recycle_freq: int
        do state recycling every 'state_recycle_freq' steps
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    atari_env: bool
        if True the env is an atari env
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    elif dpsr_replay:
        replay_buffer = DoublePrioritizedStateRecycledReplayBuffer(
            buffer_size,
            alpha1=dpsr_replay_alpha1,
            alpha2=dpsr_replay_alpha2,
            candidates_size=dpsr_replay_candidates_size,
            # Not Used: env_id=env.env.spec.id
            env_id=None)
        if dpsr_replay_beta_iters is None:
            dpsr_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(dpsr_replay_beta_iters,
                                       initial_p=dpsr_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps,
                         **kwargs)[0]
            env_action = action
            reset = False
            env_clone_state = None
            if dpsr_replay:
                env_clone_state = env.clone_state() if atari_env \
                    else copy.deepcopy(env.envs[0].env)
            new_obs, rew, done, _ = env.step(env_action)
            # Store transition in the replay buffer.
            if dpsr_replay:
                if replay_buffer.not_full():
                    replay_buffer.add(obs, action, rew, new_obs, float(done),
                                      env_clone_state, t)
                elif state_recycle_freq and t % state_recycle_freq == 0:
                    current_env_copy = None
                    if not atari_env:
                        current_env_copy = copy.deepcopy(env.envs[0].env)
                    candidates_idxes, candidates = replay_buffer.replacement_candidates(
                    )
                    candidates_recycled = []
                    for candidate in candidates:
                        cand_obs, cand_old_act, *_, cand_state, cand_t = candidate
                        if atari_env:
                            new_env = copy.deepcopy(env)
                            new_env.reset()
                            new_env.restore_state(cand_state)
                        else:
                            env.envs[0].env = cand_state
                        new_action_cand = act(np.array(cand_obs)[None],
                                              update_eps=0.0,
                                              **kwargs)[0]
                        # make sure that a new experience is made
                        if new_action_cand != cand_old_act:
                            new_action = new_action_cand
                        else:
                            while True:
                                new_action_cand = env.action_space.sample()
                                if new_action_cand != cand_old_act:
                                    new_action = new_action_cand
                                    break
                        if atari_env:
                            new_new_obs, new_rew, new_done, _ = new_env.step(
                                new_action)
                        else:
                            new_new_obs, new_rew, new_done, _ = env.step(
                                new_action)
                        new_data = (cand_obs, new_action, new_rew, new_new_obs,
                                    new_done, cand_state, t)
                        candidates_recycled.append(new_data)
                    # get the new TDEs after recycling
                    cand_obses = np.array(
                        [data[0] for data in candidates_recycled])
                    cand_acts = np.array(
                        [data[1] for data in candidates_recycled])
                    cand_rews = np.array(
                        [data[2] for data in candidates_recycled])
                    cand_new_obses = np.array(
                        [data[3] for data in candidates_recycled])
                    cand_dones = np.array(
                        [data[4] for data in candidates_recycled])
                    cand_weights = np.zeros_like(cand_rews)
                    cand_td_errors = train(cand_obses, cand_acts, cand_rews,
                                           cand_new_obses, cand_dones,
                                           cand_weights)
                    new_cand_priorities = np.abs(
                        cand_td_errors) + dpsr_replay_eps
                    replay_buffer.update_priorities(candidates_idxes,
                                                    new_cand_priorities)
                    replay_buffer.state_recycle(
                        candidates_idxes, candidates_recycled, cand_td_errors,
                        dpsr_state_recycle_max_priority_set)
                    replay_buffer.add(obs, action, rew, new_obs, float(done),
                                      env_clone_state, t)
                    if not atari_env:
                        env.envs[0].env = current_env_copy
                else:
                    # common_replacement_candidates_number = 128
                    candidates_idxes, candidates = replay_buffer.replacement_candidates(
                        dpsr_common_replacement_candidates_number)
                    cand_timestamps = [
                        candidate[-1] for candidate in candidates
                    ]
                    replace_idx = candidates_idxes[np.argmin(cand_timestamps)]
                    replay_buffer.add(obs, action, rew, new_obs, float(done),
                                      env_clone_state, t, replace_idx)
            else:
                replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                elif dpsr_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, env_states,
                     timestamps, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)
                elif dpsr_replay:
                    new_priorities = np.abs(td_errors) + dpsr_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = np.round(np.mean(episode_rewards[-101:-1]), 1)
            mean_10ep_reward = np.round(np.mean(episode_rewards[-11:-1]), 1)
            mean_5ep_reward = np.round(np.mean(episode_rewards[-6:-1]), 1)
            last_1ep_reward = np.round(np.mean(episode_rewards[-2:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("mean 10 episode reward",
                                      mean_10ep_reward)
                logger.record_tabular("mean 5 episode reward", mean_5ep_reward)
                logger.record_tabular("last episode reward", last_1ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 31
0
            obs = np.array(
                [[np.cos(row.obnow1),
                  np.sin(row.obnow1), row.obnow2]])
            action = np.array([[row.action]])
            r = np.array([[row.reward]])
            new_obs = np.array([[row.obnext1, row.obnext2, row.obnext3]])
            agent.store_transition(obs, action, r, new_obs, np.zeros_like(r))
    # training
    for t_train in range(t_train_time):
        cl, al = agent.train()
        epoch_critic_losses.append(cl)
        epoch_actor_losses.append(al)
        print(
            'step' + str(t_train) + ',critic_loss:' + str(cl) +
            ',action_loss:', str(al))
    save_variables('ddpg_model')
else:
    load_variables(load_path)

# plt.figure(1)
# plt.plot(epoch_critic_losses)
# plt.plot(epoch_actor_losses)
# plt.show()

env = gym.make('Pendulum-v0')
obs = env.reset()
for time in range(t_test_time):
    action, q, _, _ = agent.step(obs, apply_noise=False, compute_Q=True)
    s_, r, done, _ = env.step(action)
    obs = s_
    env.render()
Ejemplo n.º 32
0
def learn(env,
          use_ddpg=False,
          gamma=0.9,
          use_rs=False,
          controller_kargs={},
          option_kargs={},
          seed=None,
          total_timesteps=100000,
          print_freq=100,
          callback=None,
          checkpoint_path=None,
          checkpoint_freq=10000,
          load_path=None,
          **others):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    use_ddpg: bool
        whether to use DDPG or DQN to learn the option's policies
    gamma: float
        discount factor
    use_rs: bool
        use reward shaping
    controller_kargs
        arguments for learning the controller policy.
    option_kargs
        arguments for learning the option policies.
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    total_timesteps: int
        number of env steps to optimizer for
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    load_path: str
        path to load the model from. (default: None)

    Returns
    -------
    act: ActWrapper (meta-controller)
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    act: ActWrapper (option policies)
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    controller = ControllerDQN(env, **controller_kargs)
    if use_ddpg:
        options = OptionDDPG(env, gamma, total_timesteps, **option_kargs)
    else:
        options = OptionDQN(env, gamma, total_timesteps, **option_kargs)
    option_s = None  # State where the option initiated
    option_id = None  # Id of the current option being executed
    option_rews = []  # Rewards obtained by the current option

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    options.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break

            # Selecting an option if needed
            if option_id is None:
                valid_options = env.get_valid_options()
                option_s = obs
                option_id = controller.get_action(option_s, valid_options)
                option_rews = []

            # Take action and update exploration to the newest value
            action = options.get_action(env.get_option_observation(option_id),
                                        t, reset)
            reset = False
            new_obs, rew, done, info = env.step(action)

            # Saving the real reward that the option is getting
            if use_rs:
                option_rews.append(info["rs-reward"])
            else:
                option_rews.append(rew)

            # Store transition for the option policies
            for _s, _a, _r, _sn, _done in env.get_experience():
                options.add_experience(_s, _a, _r, _sn, _done)

            # Learn and update the target networks if needed for the option policies
            options.learn(t)
            options.update_target_network(t)

            # Update the meta-controller if needed
            # Note that this condition always hold if done is True
            if env.did_option_terminate(option_id):
                option_sn = new_obs
                option_reward = sum(
                    [_r * gamma**_i for _i, _r in enumerate(option_rews)])
                valid_options = [] if done else env.get_valid_options()
                controller.add_experience(option_s, option_id, option_reward,
                                          option_sn, done, valid_options,
                                          gamma**(len(option_rews)))
                controller.learn()
                controller.update_target_network()
                controller.increase_step()
                option_id = None

            obs = new_obs
            episode_rewards[-1] += rew

            if done:
                obs = env.reset()
                options.reset()
                episode_rewards.append(0.0)
                reset = True

            # General stats
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.dump_tabular()

            if (checkpoint_freq is not None and num_episodes > 100
                    and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            #load_variables(model_file)

    return controller.act, options.act
Ejemplo n.º 33
0
def learn(env,
          network,
          seed=None,
          use_crm=False,
          use_rs=False,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    use_crm: bool
        use counterfactual experience to train the policy
    use_rs: bool
        use reward shaping
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """

    # Adjusting hyper-parameters by considering the number of RM states for crm
    if use_crm:
        rm_states = env.get_num_rm_states()
        buffer_size = rm_states * buffer_size
        batch_size = rm_states * batch_size

    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps,
                         **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, info = env.step(env_action)

            # Store transition in the replay buffer.
            if use_crm:
                # Adding counterfactual experience (this will alrady include shaped rewards if use_rs=True)
                experiences = info["crm-experience"]
            elif use_rs:
                # Include only the current experince but shape the reward
                experiences = [info["rs-experience"]]
            else:
                # Include only the current experience (standard deepq)
                experiences = [(obs, action, rew, new_obs, float(done))]
            # Adding the experiences to the replay buffer
            for _obs, _action, _r, _new_obs, _done in experiences:
                replay_buffer.add(_obs, _action, _r, _new_obs, _done)

            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Ejemplo n.º 34
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    
    ############################## RL-S Prepare #############################################
    
    # model saved name
    saved_name = "0817"

    #####
    # Setup Training Record
    #####
    save_new_data = False
    create_new_file = False
    create_new_file_rule = create_new_file
    save_new_data_rule = save_new_data

    create_new_file_RL = False
    save_new_data_RL = save_new_data
    
    create_new_file_replay_buffer = False
    save_new_data_replay_buffer = save_new_data

    is_training = False
    trajectory_buffer = deque(maxlen=20)

    if create_new_file_replay_buffer:
        if osp.exists("recorded_replay_buffer.txt"):
            os.remove("recorded_replay_buffer.txt")
    else:
        replay_buffer_dataset = np.loadtxt("recorded_replay_buffer.txt")
        for data in replay_buffer_dataset:
            obs, action, rew, new_obs, done = _extract_data(data)
            replay_buffer.add(obs, action, rew, new_obs, done)

    recorded_replay_buffer_outfile = open("recorded_replay_buffer.txt","a")
    recorded_replay_buffer_format = " ".join(("%f",)*31)+"\n"
    
    #####
    # Setup Rule-based Record
    #####
    create_new_file_rule = True

    # create state database
    if create_new_file_rule:
        if osp.exists("state_index_rule.dat"):
            os.remove("state_index_rule.dat")
            os.remove("state_index_rule.idx")
        if osp.exists("visited_state_rule.txt"):
            os.remove("visited_state_rule.txt")
        if osp.exists("visited_value_rule.txt"):
            os.remove("visited_value_rule.txt")

        visited_state_rule_value = []
        visited_state_rule_counter = 0
    else:
        visited_state_rule_value = np.loadtxt("visited_value_rule.txt")
        visited_state_rule_value = visited_state_rule_value.tolist()
        visited_state_rule_counter = len(visited_state_rule_value)

    visited_state_rule_outfile = open("visited_state_rule.txt", "a")
    visited_state_format = " ".join(("%f",)*14)+"\n"

    visited_value_rule_outfile = open("visited_value_rule.txt", "a")
    visited_value_format = " ".join(("%f",)*2)+"\n"

    visited_state_tree_prop = rindex.Property()
    visited_state_tree_prop.dimension = 14
    visited_state_dist = np.array([[0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2]])
    visited_state_rule_tree = rindex.Index('state_index_rule',properties=visited_state_tree_prop)

    #####
    # Setup RL-based Record
    #####

    if create_new_file_RL:
        if osp.exists("state_index_RL.dat"):
            os.remove("state_index_RL.dat")
            os.remove("state_index_RL.idx")
        if osp.exists("visited_state_RL.txt"):
            os.remove("visited_state_RL.txt")
        if osp.exists("visited_value_RL.txt"):
            os.remove("visited_value_RL.txt")

    if create_new_file_RL:
        visited_state_RL_value = []
        visited_state_RL_counter = 0
    else:
        visited_state_RL_value = np.loadtxt("visited_value_RL.txt")
        visited_state_RL_value = visited_state_RL_value.tolist()
        visited_state_RL_counter = len(visited_state_RL_value)

    visited_state_RL_outfile = open("visited_state_RL.txt", "a")
    visited_state_format = " ".join(("%f",)*14)+"\n"

    visited_value_RL_outfile = open("visited_value_RL.txt", "a")
    visited_value_format = " ".join(("%f",)*2)+"\n"

    visited_state_tree_prop = rindex.Property()
    visited_state_tree_prop.dimension = 14
    visited_state_dist = np.array([[0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2]])
    visited_state_RL_tree = rindex.Index('state_index_RL',properties=visited_state_tree_prop)


    ############################## RL-S Prepare End #############################################
    
    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action, q_function_cz = act(np.array(obs)[None], update_eps=update_eps, **kwargs)
            
            # RLS_action = generate_RLS_action(obs,q_function_cz,action,visited_state_rule_value,
            #                                 visited_state_rule_tree,visited_state_RL_value,
            #                                 visited_state_RL_tree,is_training)

            RLS_action = 0

            env_action = RLS_action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)

            ########### Record data in trajectory buffer and local file, but not in replay buffer ###########

            trajectory_buffer.append((obs, action, float(rew), new_obs, float(done)))

            # Store transition in the replay buffer.
            # replay_buffer.add(obs, action, rew, new_obs, float(done))

            obs = new_obs
            episode_rewards[-1] += rew # safe driving is 1, collision is 0


            while len(trajectory_buffer)>10:
                # if safe driving for 10(can be changed) steps, the state is regarded as safe
                obs_left, action_left, rew_left, new_obs_left, done_left = trajectory_buffer.popleft()
                # save this state in local replay buffer file
                if save_new_data_replay_buffer:
                    recorded_data = _wrap_data(obs_left, action_left, rew_left, new_obs_left, done_left)
                    recorded_replay_buffer_outfile.write(recorded_replay_buffer_format % tuple(recorded_data))
                # put this state in replay buffer
                replay_buffer.add(obs_left[0], action_left, float(rew_left), new_obs_left[0], float(done_left))
                action_to_record = action_left
                r_to_record = rew_left
                obs_to_record = obs_left

                # save this state in rule-based or RL-based visited state
                if action_left == 0:
                    if save_new_data_rule:
                        visited_state_rule_value.append([action_to_record,r_to_record])
                        visited_state_rule_tree.insert(visited_state_rule_counter,
                            tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                        visited_state_rule_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                        visited_value_rule_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                        visited_state_rule_counter += 1
                else:
                    if save_new_data_RL:
                        visited_state_RL_value.append([action_to_record,r_to_record])
                        visited_state_RL_tree.insert(visited_state_RL_counter,
                            tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                        visited_state_RL_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                        visited_value_RL_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                        visited_state_RL_counter += 1

            ################# Record data end ########################
            
            
            if done:
                """ 
                Get collision or out of multilane map
                """
                ####### Record the trajectory data and add data in replay buffer #########
                _, _, rew_right, _, _ = trajectory_buffer[-1]

                while len(trajectory_buffer)>0:
                    obs_left, action_left, rew_left, new_obs_left, done_left = trajectory_buffer.popleft()
                    action_to_record = action_left
                    r_to_record = (rew_right-rew_left)*gamma**len(trajectory_buffer) + rew_left
                    # record in local replay buffer file
                    if save_new_data_replay_buffer:
                        obs_to_record = obs_left
                        recorded_data = _wrap_data(obs_left, action_left, r_to_record, new_obs_left, done_left)
                        recorded_replay_buffer_outfile.write(recorded_replay_buffer_format % tuple(recorded_data))
                    # record in replay buffer for trainning
                    replay_buffer.add(obs_left[0], action_left, float(r_to_record), new_obs_left[0], float(done_left))

                    # save visited rule/RL state data in local file
                    if action_left == 0:
                        if save_new_data_rule:
                            visited_state_rule_value.append([action_to_record,r_to_record])
                            visited_state_rule_tree.insert(visited_state_rule_counter,
                                tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                            visited_state_rule_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                            visited_value_rule_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                            visited_state_rule_counter += 1
                    else:
                        if save_new_data_RL:
                            visited_state_RL_value.append([action_to_record,r_to_record])
                            visited_state_RL_tree.insert(visited_state_RL_counter,
                                tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                            visited_state_RL_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                            visited_value_RL_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                            visited_state_RL_counter += 1

                ####### Recorded #####

                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            ############### Trainning Part Start #####################
            if not is_training:
                # don't need to train the model
                continue

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward

                    rew_str = str(mean_100ep_reward)
                    path = osp.expanduser("~/models/carlaok_checkpoint/"+saved_name+"_"+rew_str)
                    act.save(path)

        #### close the file ####
        visited_state_rule_outfile.close()
        visited_value_rule_outfile.close()
        recorded_replay_buffer_outfile.close()
        if not is_training:
            testing_record_outfile.close()
        #### close the file ###

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act