Exemplo n.º 1
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps,
                         **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act, debug['q_func'], debug['obs']
Exemplo n.º 2
0
def train_policy(arglist):
    with U.single_threaded_session():
        # Create the environment
        if arglist.use_dense_rewards:
            print("Will use env MineRLNavigateDense-v0")
            env = gym.make("MineRLNavigateDense-v0")
            env_name = "MineRLNavigateDense-v0"
        else:
            print("Will use env MineRLNavigate-v0")
            env = gym.make('MineRLNavigate-v0')
            env_name = "MineRLNavigate-v0"

        if arglist.force_forward:
            env = MineCraftWrapperSimplified(env)
        else:
            env = MineCraftWrapper(env)

        if not arglist.use_demonstrations:
            # Use stack of last 4 frames as obs
            env = FrameStack(env, 4)

        # Create all the functions necessary to train the model
        act, train, update_target, debug = deepq.build_train(
            make_obs_ph=lambda name: ObservationInput(env.observation_space,
                                                      name=name),
            q_func=build_q_func('conv_only', dueling=True),
            num_actions=env.action_space.n,
            gamma=0.9,
            optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
        )

        # Create the replay buffer(s) (TODO: Use prioritized replay buffer)
        if arglist.use_demonstrations:
            replay_buffer = ReplayBuffer(int(arglist.replay_buffer_len / 2))
            demo_buffer = load_demo_buffer(env_name,
                                           int(arglist.replay_buffer_len / 2))
        else:
            replay_buffer = ReplayBuffer(arglist.replay_buffer_len)

        # Create the schedule for exploration starting from 1 (every action is random) down to
        # 0.02 (98% of actions are selected according to values predicted by the model).
        exploration = LinearSchedule(
            schedule_timesteps=arglist.num_exploration_steps *
            arglist.num_episodes * arglist.max_episode_steps,
            initial_p=1.0,
            final_p=arglist.final_epsilon)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()

        episode_rewards = [0.0]
        n_episodes = 0
        n_steps = 0
        obs = env.reset()
        log_path = "./learning_curves/minerl_" + str(date.today()) + "_" + str(
            time.time()) + ".dat"
        log_file = open(log_path, "a")
        for episode in range(arglist.num_episodes):
            print("Episode: ", str(episode))
            done = False
            episode_steps = 0
            while not done:

                # Take action and update exploration to the newest value
                action = act(obs[None],
                             update_eps=exploration.value(n_steps))[0]
                new_obs, rew, done, _ = env.step(action)
                n_steps += 1
                episode_steps += 1

                # Break episode
                if episode_steps > arglist.max_episode_steps:
                    done = True

                # Store transition in the replay buffer.
                replay_buffer.add(obs, action, rew, new_obs, float(done))
                obs = new_obs

                # Store rewards
                episode_rewards[-1] += rew
                if done:
                    obs = env.reset()
                    episode_rewards.append(0)
                    n_episodes += 1

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if (n_steps > arglist.learning_starts_at_steps) and (n_steps %
                                                                     4 == 0):
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        32)
                    train(obses_t, actions, rewards, obses_tp1, dones,
                          np.ones_like(rewards))

                if arglist.use_demonstrations:
                    if (n_steps < arglist.learning_starts_at_steps) and (
                            n_steps % 4 == 0):
                        obses_t, actions, rewards, obses_tp1, dones = demo_buffer.sample(
                            32)
                        train(obses_t, actions, rewards, obses_tp1, dones,
                              np.ones_like(rewards))
                    if (n_steps > arglist.learning_starts_at_steps) and (
                            n_steps % 4 == 0):
                        obses_t, actions, rewards, obses_tp1, dones = demo_buffer.sample(
                            32)
                        train(obses_t, actions, rewards, obses_tp1, dones,
                              np.ones_like(rewards))

                # Update target network periodically.
                if n_steps % arglist.target_net_update_freq == 0:
                    update_target()

                # Log data for analysis
                if done and len(episode_rewards) % 10 == 0:
                    logger.record_tabular("steps", n_steps)
                    logger.record_tabular("episodes", len(episode_rewards))
                    logger.record_tabular(
                        "mean episode reward",
                        round(np.mean(episode_rewards[-101:-1]), 1))
                    logger.record_tabular(
                        "% time spent exploring",
                        int(100 * exploration.value(n_steps)))
                    logger.dump_tabular()

                #TODO: Save checkpoints
                if n_steps % arglist.checkpoint_rate == 0:
                    checkpoint_path = "./checkpoints/minerl_" + str(
                        episode) + "_" + str(date.today()) + "_" + str(
                            time.time()) + ".pkl"
                    save_variables(checkpoint_path)
                    print("%s,%s,%s,%s" %
                          (n_steps, episode,
                           round(np.mean(episode_rewards[-101:-1]),
                                 1), int(100 * exploration.value(n_steps))),
                          file=log_file)
        log_file.close()
Exemplo n.º 3
0
    def __init__(
            self,
            env,
            # observation_space,
            # action_space,
            network=None,
            scope='deepq',
            seed=None,
            lr=None,  # Was 5e-4
            lr_mc=5e-4,
            total_episodes=None,
            total_timesteps=100000,
            buffer_size=50000,
            exploration_fraction=0.1,
            exploration_final_eps=None,  # was 0.02
            train_freq=1,
            train_log_freq=100,
            batch_size=32,
            print_freq=100,
            checkpoint_freq=10000,
            # checkpoint_path=None,
            learning_starts=1000,
            gamma=None,
            target_network_update_freq=500,
            prioritized_replay=False,
            prioritized_replay_alpha=0.6,
            prioritized_replay_beta0=0.4,
            prioritized_replay_beta_iters=None,
            prioritized_replay_eps=1e-6,
            save_path=None,
            load_path=None,
            save_reward_threshold=None,
            **network_kwargs):
        super().__init__(env, seed)
        if train_log_freq % train_freq != 0:
            raise ValueError(
                'Train log frequency should be a multiple of train frequency')
        elif checkpoint_freq % train_log_freq != 0:
            raise ValueError(
                'Checkpoint freq should be a multiple of train log frequency, or model saving will not be logged properly'
            )
        print('init dqnlearningagent')
        self.train_log_freq = train_log_freq
        self.scope = scope
        self.learning_starts = learning_starts
        self.save_reward_threshold = save_reward_threshold
        self.batch_size = batch_size
        self.train_freq = train_freq
        self.total_episodes = total_episodes
        self.total_timesteps = total_timesteps
        # TODO: scope not doing anything.
        if network is None and 'lunar' in env.unwrapped.spec.id.lower():
            if lr is None:
                lr = 1e-3
            if exploration_final_eps is None:
                exploration_final_eps = 0.02
            #exploration_fraction = 0.1
            #exploration_final_eps = 0.02
            target_network_update_freq = 1500
            #print_freq = 100
            # num_cpu = 5
            if gamma is None:
                gamma = 0.99

            network = 'mlp'
            network_kwargs = {
                'num_layers': 2,
                'num_hidden': 64,
            }

        self.target_network_update_freq = target_network_update_freq
        self.gamma = gamma

        get_session()
        # set_global_seeds(seed)
        # TODO: Check whether below is ok to substitue for set_global_seeds.
        try:
            import tensorflow as tf
            tf.set_random_seed(seed)
        except ImportError:
            pass

        self.q_func = build_q_func(network, **network_kwargs)

        # capture the shape outside the closure so that the env object is not serialized
        # by cloudpickle when serializing make_obs_ph

        def make_obs_ph(name):
            return ObservationInput(env.observation_space, name=name)

        act, self.train, self.train_mc, self.update_target, debug = deepq.build_train(
            make_obs_ph=make_obs_ph,
            q_func=self.q_func,
            num_actions=env.action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=lr),
            optimizer_mc=tf.train.AdamOptimizer(learning_rate=lr_mc),
            gamma=gamma,
            grad_norm_clipping=10,
            param_noise=False,
            scope=scope,
            # reuse=reuse,
        )

        act_params = {
            'make_obs_ph': make_obs_ph,
            'q_func': self.q_func,
            'num_actions': env.action_space.n,
        }

        self._act = ActWrapper(act, act_params)

        self.print_freq = print_freq
        self.checkpoint_freq = checkpoint_freq
        # Create the replay buffer
        self.prioritized_replay = prioritized_replay
        self.prioritized_replay_eps = prioritized_replay_eps

        if self.prioritized_replay:
            self.replay_buffer = PrioritizedReplayBuffer(
                buffer_size,
                alpha=prioritized_replay_alpha,
            )
            if prioritized_replay_beta_iters is None:
                if total_episodes is not None:
                    raise NotImplementedError(
                        'Need to check how to set exploration based on episodes'
                    )
                prioritized_replay_beta_iters = total_timesteps
            self.beta_schedule = LinearSchedule(
                prioritized_replay_beta_iters,
                initial_p=prioritized_replay_beta0,
                final_p=1.0,
            )
        else:
            self.replay_buffer = ReplayBuffer(buffer_size)
            self.replay_buffer_mc = ReplayBuffer(buffer_size)
            self.beta_schedule = None
        # Create the schedule for exploration starting from 1.
        self.exploration = LinearSchedule(
            schedule_timesteps=int(
                exploration_fraction *
                total_timesteps if total_episodes is None else total_episodes),
            initial_p=1.0,
            final_p=exploration_final_eps,
        )

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        self.update_target()

        self.episode_lengths = [0]
        self.episode_rewards = [0.0]
        self.discounted_episode_rewards = [0.0]
        self.start_values = [None]
        self.lunar_crashes = [0]
        self.lunar_goals = [0]
        self.saved_mean_reward = None

        self.td = None
        if save_path is None:
            self.td = tempfile.mkdtemp()
            outdir = self.td
            self.model_file = os.path.join(outdir, "model")
        else:
            outdir = os.path.dirname(save_path)
            os.makedirs(outdir, exist_ok=True)
            self.model_file = save_path
        print('DQN agent saving to:', self.model_file)
        self.model_saved = False

        if tf.train.latest_checkpoint(outdir) is not None:
            # TODO: Check scope addition
            load_variables(self.model_file, scope=self.scope)
            # load_variables(self.model_file)
            logger.log('Loaded model from {}'.format(self.model_file))
            self.model_saved = True
            raise Exception('Check that we want to load previous model')
        elif load_path is not None:
            # TODO: Check scope addition
            load_variables(load_path, scope=self.scope)
            # load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        self.train_log_file = None
        if save_path and load_path is None:
            self.train_log_file = self.model_file + '.log.csv'
            with open(self.train_log_file, 'w') as f:
                cols = [
                    'episode',
                    't',
                    'td_max',
                    'td_mean',
                    '100ep_r_mean',
                    '100ep_r_mean_discounted',
                    '100ep_v_mean',
                    '100ep_n_crashes_mean',
                    '100ep_n_goals_mean',
                    'saved_model',
                    'smoothing',
                ]
                f.write(','.join(cols) + '\n')

        self.training_episode = 0
        self.t = 0
        self.episode_t = 0
        """
        n = observation_space.n
        m = action_space.n
        self.Q = np.zeros((n, m))

        self._lr_schedule = lr_schedule
        self._eps_schedule = eps_schedule
        self._boltzmann_schedule = boltzmann_schedule
        """

        # Make placeholder for Q values
        self.q_values = debug['q_values']
Exemplo n.º 4
0
    def __init__(self,
                 env,
                 network='mlp',
                 lr=5e-4,
                 buffer_size=50000,
                 exploration_epsilon=0.1,
                 train_freq=1,
                 batch_size=32,
                 learning_starts=1000,
                 target_network_update_freq=500,
                 **network_kwargs):
        """DQN wrapper to train option policies

        Parameters
        -------
        env: gym.Env
            environment to train on
        network: string or a function
            neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
            (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
            will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
        lr: float
            learning rate for adam optimizer
        buffer_size: int
            size of the replay buffer
        exploration_epsilon: float
            value of random action probability
        train_freq: int
            update the model every `train_freq` steps.
        batch_size: int
            size of a batch sampled from replay buffer for training
        learning_starts: int
            how many steps of the model to collect transitions for before learning starts
        target_network_update_freq: int
            update the target network every `target_network_update_freq` steps.
        network_kwargs
            additional keyword arguments to pass to the network builder.
        """

        # Creating the network
        q_func = build_q_func(network, **network_kwargs)

        # capture the shape outside the closure so that the env object is not serialized
        # by cloudpickle when serializing make_obs_ph

        observation_space = env.controller_observation_space

        def make_obs_ph(name):
            return ObservationInput(observation_space, name=name)

        act, train, update_target, debug = build_train(
            make_obs_ph=make_obs_ph,
            q_func=q_func,
            num_actions=env.controller_action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=lr),
            grad_norm_clipping=10,
            scope="controller")

        act_params = {
            'make_obs_ph': make_obs_ph,
            'q_func': q_func,
            'num_actions': env.controller_action_space.n,
        }

        act = ActWrapper(act, act_params)

        # Create the replay buffer
        replay_buffer = ReplayBuffer(buffer_size)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()

        # Variables that are used during learning
        self.act = act
        self.train = train
        self.update_target = update_target
        self.replay_buffer = replay_buffer
        self.exp_epsilon = exploration_epsilon
        self.train_freq = train_freq
        self.batch_size = batch_size
        self.learning_starts = learning_starts
        self.target_network_update_freq = target_network_update_freq
        self.num_actions = env.controller_action_space.n
        self.t = 0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=3000,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=3000,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.
    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.
    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)


    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=0.99,
        double_q=False
        #grad_norm_clipping=10,
        # param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(10000),
                                 initial_p=1.0,
                                 final_p=0.02)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()


    old_state = None





    formula_LTLf_1 = "!d U(g)"
    monitoring_RightToLeft = MonitoringSpecification(
        ltlf_formula=formula_LTLf_1,
        r=0,
        c=-0.01,
        s=10,
        f=-10
    )

    formula_LTLf_2 = "F(G(bb)) "  # break brick
    monitoring_BreakBrick = MonitoringSpecification(
        ltlf_formula=formula_LTLf_2,
        r=10,
        c=-0.01,
        s=10,
        f=0
    )

    monitoring_specifications = [monitoring_BreakBrick, monitoring_RightToLeft]




    def RightToLeftConversion(observation) -> TraceStep:

        done=False
        global old_state
        if arrays_equal(observation[-9:], np.zeros((len(observation[-9:])))):  ### Checking if all Bricks are broken
            # print('goal reached')
            goal = True  # all bricks are broken
            done = True
        else:
            goal = False

        dead = False
        if done and not goal:
            dead = True


        order = check_ordered(observation[-9:])
        if not order:
            # print('wrong order', state[5:])
            dead=True
            done = True

        if old_state is not None:  # if not the first state
            if not arrays_equal(old_state[-9:], observation[-9:]):
                brick_broken = True
                # check_ordered(state[-9:])
                # print(' a brick is broken')
            else:
                brick_broken = False
        else:
            brick_broken = False




        dictionary={'g': goal, 'd': dead, 'o': order, 'bb':brick_broken}
        #print(dictionary)
        return dictionary

    multi_monitor = MultiRewardMonitor(
        monitoring_specifications=monitoring_specifications,
        obs_to_trace_step=RightToLeftConversion
    )


    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True
            # initialize
    done = False
    #monitor.get_reward(None, False) # add first state in trace
        

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        episodeCounter=0
        num_episodes=0
        for t in itertools.count():
            
            # Take action and update exploration to the newest value
            action = act(obs[None], update_eps=exploration.value(t))[0]
            #print(action)
            #print(action)
            new_obs, rew, done, _ = env.step(action)

            done=False
            #done=False ## FOR FIRE ONLY

            #print(new_obs)

            #new_obs.append()

            start_time = time.time()
            rew, is_perm = multi_monitor(new_obs)
            #print("--- %s seconds ---" % (time.time() - start_time))
            old_state=new_obs
            #print(rew)


            done=done or is_perm



            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew


            is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
            if episodeCounter % 100 == 0 or episodeCounter<1:
                # Show off the result
                #print("coming here Again and Again")
                env.render()


            if done:
                episodeCounter+=1
                num_episodes+=1
                obs = env.reset()
                old_state=None
                episode_rewards.append(0)



                multi_monitor.reset()
                #monitor.get_reward(None, False)




            else:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if t > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(64)
                    train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))

                # Update target network periodically.
                if t % 1000 == 0:
                    update_target()
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            if done and len(episode_rewards) % 10 == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", len(episode_rewards))
                logger.record_tabular("currentEpisodeReward", episode_rewards[-1])
                logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    act.save_act()
                    #save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        # if model_saved:
        #     if print_freq is not None:
        #         logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
        #     load_variables(model_file)

    return act
Exemplo n.º 6
0
    def __init__(self,
                 env,
                 gamma,
                 total_timesteps,
                 network='mlp',
                 lr=5e-4,
                 buffer_size=50000,
                 exploration_fraction=0.1,
                 exploration_final_eps=0.02,
                 train_freq=1,
                 batch_size=32,
                 learning_starts=1000,
                 target_network_update_freq=500,
                 prioritized_replay=False,
                 prioritized_replay_alpha=0.6,
                 prioritized_replay_beta0=0.4,
                 prioritized_replay_beta_iters=None,
                 prioritized_replay_eps=1e-6,
                 param_noise=False,
                 **network_kwargs):
        """DQN wrapper to train option policies

        Parameters
        -------
        env: gym.Env
            environment to train on
        gamma: float
            discount factor
        network: string or a function
            neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
            (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
            will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
        total_timesteps: int
            number of env steps to optimizer for
        lr: float
            learning rate for adam optimizer
        buffer_size: int
            size of the replay buffer
        exploration_fraction: float
            fraction of entire training period over which the exploration rate is annealed
        exploration_final_eps: float
            final value of random action probability
        train_freq: int
            update the model every `train_freq` steps.
        batch_size: int
            size of a batch sampled from replay buffer for training
        learning_starts: int
            how many steps of the model to collect transitions for before learning starts
        target_network_update_freq: int
            update the target network every `target_network_update_freq` steps.
        prioritized_replay: True
            if True prioritized replay buffer will be used.
        prioritized_replay_alpha: float
            alpha parameter for prioritized replay buffer
        prioritized_replay_beta0: float
            initial value of beta for prioritized replay buffer
        prioritized_replay_beta_iters: int
            number of iterations over which beta will be annealed from initial value
            to 1.0. If set to None equals to total_timesteps.
        prioritized_replay_eps: float
            epsilon to add to the TD errors when updating priorities.
        param_noise: bool
            whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
        **network_kwargs
            additional keyword arguments to pass to the network builder.
        """

        # Adjusting hyper-parameters by considering the number of options policies to learn
        num_options = env.get_number_of_options()
        buffer_size = num_options * buffer_size
        batch_size = num_options * batch_size

        q_func = build_q_func(network, **network_kwargs)

        # capture the shape outside the closure so that the env object is not serialized
        # by cloudpickle when serializing make_obs_ph

        observation_space = env.option_observation_space

        def make_obs_ph(name):
            return ObservationInput(observation_space, name=name)

        self.num_actions = env.option_action_space.n

        act, train, update_target, debug = deepq.build_train(
            make_obs_ph=make_obs_ph,
            q_func=q_func,
            num_actions=self.num_actions,
            optimizer=tf.train.AdamOptimizer(learning_rate=lr),
            gamma=gamma,
            grad_norm_clipping=10,
            param_noise=param_noise,
            scope="options")

        act_params = {
            'make_obs_ph': make_obs_ph,
            'q_func': q_func,
            'num_actions': self.num_actions,
        }

        act = ActWrapper(act, act_params)

        # Create the replay buffer
        if prioritized_replay:
            replay_buffer = PrioritizedReplayBuffer(
                buffer_size, alpha=prioritized_replay_alpha)
            if prioritized_replay_beta_iters is None:
                prioritized_replay_beta_iters = total_timesteps
            beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                           initial_p=prioritized_replay_beta0,
                                           final_p=1.0)
        else:
            replay_buffer = ReplayBuffer(buffer_size)
            beta_schedule = None
        # Create the schedule for exploration starting from 1.
        exploration = LinearSchedule(schedule_timesteps=int(
            exploration_fraction * total_timesteps),
                                     initial_p=1.0,
                                     final_p=exploration_final_eps)

        # Initialize the parameters and copy them to the target network.
        U.initialize()
        update_target()

        # Variables that are used during learning
        self.act = act
        self.train = train
        self.update_target = update_target
        self.replay_buffer = replay_buffer
        self.beta_schedule = beta_schedule
        self.exploration = exploration
        self.param_noise = param_noise
        self.train_freq = train_freq
        self.batch_size = batch_size
        self.learning_starts = learning_starts
        self.target_network_update_freq = target_network_update_freq

        self.prioritized_replay = prioritized_replay
        self.prioritized_replay_alpha = prioritized_replay_alpha
        self.prioritized_replay_beta0 = prioritized_replay_beta0
        self.prioritized_replay_beta_iters = prioritized_replay_beta_iters
        self.prioritized_replay_eps = prioritized_replay_eps
Exemplo n.º 7
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=5,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the trained model from. (default: None)(used in test stage)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.

    """

    # Create all the functions necessary to train the model
    sess = get_session()
    set_global_seeds(seed)
    med_libs = MedLibs()
    '''Define Q network 
    inputs: observation place holder(make_obs_ph), num_actions, scope, reuse
    outputs(tensor of shape batch_size*num_actions): values of each action, Q(s,a_{i})
    '''
    q_func = build_q_func(network, **network_kwargs)
    '''  To put observations into a placeholder  '''
    # TODO: Can only deal with Discrete and Box observation spaces for now
    # observation_space = env.observation_space (default)
    # Use sub_obs_space instead

    observation_space = med_libs.subobs_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    '''  Customize action  '''
    # TODO: subset of action space.
    action_dim = med_libs.sub_act_dim
    ''' 
    Returns: deepq.build_train()
        act: (tf.Variable, bool, float) -> tf.Variable
            function to select and action given observation.
            act is computed by [build_act] or [build_act_with_param_noise]
        train: (object, np.array, np.array, object, np.array, np.array) -> np.array
            optimize the error in Bellman's equation.
        update_target: () -> ()
            copy the parameters from optimized Q function to the target Q function. 
        debug: {str: function}
            a bunch of functions to print debug data like q_values.
    '''

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=action_dim,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        double_q=True,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': action_dim,
    }
    '''Contruct an act object using ActWrapper'''
    act = ActWrapper(act, act_params)
    ''' Create the replay buffer'''
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    '''Create the schedule for exploration starting from 1.'''
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)
    '''
    Initialize all the uninitialized variables in the global scope and copy them to the target network.
    '''
    U.initialize()
    update_target()
    episode_rewards = [0.0]
    saved_mean_reward = None

    obs = env.reset()
    sub_obs = med_libs.custom_obs(obs)  # TODO: customize observations
    pre_obs = obs
    reset = True
    mydict = med_libs.action_dict
    already_starts = False

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td
        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True

        elif load_path is not None:
            # load_path: a trained model/policy
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))
        ''' Training loop starts'''
        t = 0
        while t < total_timesteps:
            if callback is not None:
                if callback(locals(), globals()):
                    break
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            ''' Choose action: take action and update exploration to the newest value
            '''
            # TODO: Mixed action strategy
            # Normal status, action is easily determined by rules, use [obs]
            action = med_libs.simple_case_action(obs)
            # Distraction status, action is determined by Q, with [sub_obs]
            if action == -10:
                action = act(np.array(sub_obs)[None],
                             update_eps=update_eps,
                             **kwargs)[0]
                action = med_libs.action_Q_env(
                    action
                )  # TODO:action_Q_env, from Q_action(0~2) to env_action(2~4)

            reset = False
            ''' Step action '''
            new_obs, rew, done, d_info = env.step(action)
            d_att_last = int(pre_obs[0][0])
            d_att_now = int(obs[0][0])
            d_att_next = int(new_obs[0][0])
            ''' Store transition in the replay buffer.'''
            pre_obs = obs
            obs = new_obs
            sub_new_obs = med_libs.custom_obs(new_obs)

            if (d_att_last == 0 and d_att_now == 1) and not already_starts:
                already_starts = True

            if already_starts and d_att_now == 1:
                replay_buffer.add(sub_obs, action, rew, sub_new_obs,
                                  float(done))
                episode_rewards[-1] += rew  # Sum of rewards
                t = t + 1
                print(
                    '>> Iteration:{}, State[d_att,cd_activate,L4_available,ssl4_activate,f_dc]:{}'
                    .format(t, sub_obs))
                print(
                    'Dis_Last:{}, Dis_Now:{}, Dis_Next:{},Reward+Cost:{}, Action:{}'
                    .format(
                        d_att_last, d_att_now, d_att_next, rew,
                        list(mydict.keys())[list(
                            mydict.values()).index(action)]))

            # update sub_obs
            sub_obs = sub_new_obs

            # Done and Reset
            if done:
                print('Done infos: ', d_info)
                print('======= end =======')
                obs = env.reset()
                sub_obs = med_libs.custom_obs(obs)  # TODO: custom obs
                pre_obs = obs  # TODO: save obs at t-1
                already_starts = False
                episode_rewards.append(0.0)
                reset = True

            # Update the Q network parameters
            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None

                # Calculate td-errors
                actions = med_libs.action_env_Q(
                    actions
                )  # TODO:action_env_Q, from env_action(2~4) to Q_action(0~2)
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)

                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically, copy weights of Q to target Q
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Exemplo n.º 8
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Exemplo n.º 9
0
from baselines.deepq.replay_buffer import ReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.deepq.models import build_q_func

t_train_time = 2e5
t_test_time = 10000

env = gym.make('CartPole-v0')
action_shape = (1, )
nb_action = 1
observation_shape = (3, )
dataPrimary = pd.read_csv("data_c/Cartpole-v0.csv", header=1)
load_path = 'ddpg_model'
load_path = None

q_func = build_q_func('mlp')
act, train, update_target, debug = deepq.build_train(
    make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name
                                              ),
    q_func=q_func,
    num_actions=nb_action,
    optimizer=tf.train.AdamOptimizer(learning_rate=5e-4),
)
replay_buffer = ReplayBuffer(50000)
U.initialize()
update_target()
episode_rewards = [0.0]

if load_path is None:
    for index, row in dataPrimary.iterrows():
        if index > 2:
Exemplo n.º 10
0
def main():
    # configure logger, disable logging in child MPI processes (with rank > 0)
    arg_parser = common_arg_parser()
    args, unknown_args = arg_parser.parse_known_args()
    extra_args = parse_cmdline_kwargs(unknown_args)

    if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
        rank = 0
        logger.configure()
    else:
        logger.configure(format_strs=[])
        rank = MPI.COMM_WORLD.Get_rank()

    model, env, debug = train(args, extra_args) # Get the trained model
    env.close()

    if args.save_path is not None and rank == 0:
        save_path = osp.expanduser(args.save_path)
        model.save(save_path)

    if args.adv_alg: # If attack is applied, build the function for crafting adversarial observations
        g = tf.Graph()
        with g.as_default():
            with tf.Session() as sess:
                q_func = build_q_func(network='conv_only')
                craft_adv_obs = build_adv(
                    make_obs_tf=lambda name: ObservationInput(env.observation_space, name=name),
                    q_func=q_func, num_actions=env.action_space.n, epsilon=args.epsilon,
                    attack=args.adv_alg
                )

    if args.save_info: # Save all the information in a csv filter
        name = args.info_name
        csv_file = open('/Users/harry/Documents/info/' + name, mode='a' )
        fieldnames = ['episode', 'diff_type', 'diff', 'epsilon', 'steps', 'attack rate', 'success rate', 'score']
        writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
        writer.writeheader()

    if args.play:
        logger.log("Running trained model")
        env = build_env(args)
        obs = env.reset()
        action_meanings = env.unwrapped.get_action_meanings()
        def initialize_placeholders(nlstm=128,**kwargs):
            return np.zeros((args.num_env or 1, 2*nlstm)), np.zeros((1))
        state, dones = initialize_placeholders(**extra_args)

        num_episodes = 0
        num_moves = 0
        num_success_attack = 0
        num_attack = 0
        step = 0
        q_value_dict = {}
        old_diff = 0

        diff_type = args.diff_type
        print("Type of diff: {}. Threshold to launch attack: {}".format(diff_type, args.diff))
        print('-------------------------Episode 0 -------------------------')
        while True:
            step = step + 1 # Overall steps. Does not reset to 0 when an episode ends
            num_moves = num_moves + 1
            q_values = debug['q_values']([obs])
            q_values = np.squeeze(q_values)

            minus_diff = np.max(q_values) - np.min(q_values)
            div_diff = np.max(q_values) / np.min(q_values)
            sec_ord_diff = minus_diff - old_diff
            old_diff = minus_diff

            if args.save_q_value: # Save the q value to a file
                with open('/Users/harry/Documents/q_value_pong_ep' + str(num_episodes+1) + '_diff' + str(args.diff) + '.csv', 'a') as q_value_file:
                    q_value_writter = csv.writer(q_value_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
                    q_value_writter.writerow(q_values)

            if args.adv_alg:
                diff = minus_diff if args.diff_type == 'diff' else div_diff \
                                  if args.diff_type == 'div_diff' else sec_ord_diff \
                                  if args.diff_type == 'sec_ord_diff' else minus_diff

                if diff >= args.diff:
                    num_attack = num_attack + 1
                    with g.as_default():
                        with tf.Session() as sess:
                            sess.run(tf.global_variables_initializer())
                            adv_obs = craft_adv_obs([obs])[0] # Get the adversarial observation
                            adv_obs = np.rint(adv_obs)
                            adv_obs = adv_obs.astype(np.uint8)

                    if args.preview_image: # Show a few adversarial images on the screen
                        if num_attack >= 2 and num_attack <= 10:
                            adv_img = Image.fromarray(np.asarray(adv_obs[:,:,0]), mode='L')
                            adv_img.show()

                    if args.save_image: # Save one episode of adversarial images in a folder
                        if num_episodes == 0:
                            img = Image.fromarray(np.asarray(adv_obs[:,:,0]), mode='L')
                            img.save('/Users/harry/Documents/adv_19_99/adv_image_' + str(num_moves) + '.png')

                    prev_state = np.copy(state)
                    action, _, _, _ = model.step(obs,S=prev_state, M=dones)
                    adv_action, _, state, _ = model.step(adv_obs,S=prev_state, M=dones)
                    if (adv_action != action): # Count as a successful atttack
                        # print('Action before: {}, Action after: {}'.format(
                        #       action_meanings[action[0]], action_meanings[adv_action[0]]))
                        num_success_attack = num_success_attack + 1
                    obs, rew, done, info = env.step(adv_action)
                else:
                    action, _, state, _ = model.step(obs,S=state, M=dones)
                    obs, rew, done, info = env.step(action)
                    if args.save_image:
                        img = Image.fromarray(np.asarray(obs[:,:,0]), mode='L')
                        img.save('/Users/harry/Documents/adv_images_ep' + str(num_episodes+1) + '/' + str(num_moves) + '.png')
            else:
                if args.save_image: # Save one episode of normal images in a folder
                    if num_episodes == 0:
                        img = Image.fromarray(np.asarray(obs[:,:,0]), mode='L')
                        img.save('/Users/harry/Documents/normal_obs' + str(num_moves) + '.png')
                action, _, state, _ = model.step(obs,S=state, M=dones)
                obs, _, done, info = env.step(action)
            env.render()
            done = done.any() if isinstance(done, np.ndarray) else done

            if done:
                npc_score = info['episode']['r']
                score = 21 if npc_score < 0 else 21 - npc_score
                obs = env.reset()
                print('Episode {} takes {} time steps'.format(num_episodes, num_moves))
                print('NPC Score: {}'.format(npc_score))
                if args.adv_alg:
                    attack_rate = float(num_attack) / num_moves
                    success_rate = float(num_success_attack) / num_attack
                    print('Percentage of attack: {}'.format(100 * attack_rate))
                    print('Percentage of successful attacks: {}'.format(100 * success_rate))
                    info_dict = {'episode': num_episodes+1,'diff_type': args.diff_type, 'diff': args.diff, 'epsilon': args.epsilon,
                             'steps': num_moves, 'attack rate': attack_rate, 'success rate': success_rate, 'score': score}
                    writer.writerow(info_dict)

                num_moves = 0
                num_transfer = 0
                num_episodes = num_episodes + 1
                num_attack = 0
                num_success_attack = 0
                print(f'-------------------------Episode {num_episodes}-------------------------')

        env.close()
Exemplo n.º 11
0
def train_dqn(opts,
              seed=None,
              lr=1e-3,
              total_timesteps=500000,
              buffer_size=50000,
              exploration_fraction=0.1,
              exploration_final_eps=0.02,
              train_freq=1,
              batch_size=32,
              checkpoint_freq=500000,
              learning_starts=1000,
              gamma=1.000,
              target_network_update_freq=3000,
              load_path=None):
    """
    Runs the main recorder by binding certain discrete actions to keys.
    """
    if os.path.exists(opts.model_dir):
        print('Path already exists. Remove? y for yes')
        input_char = getch.getch()
        if not input_char == 'y':
            print('Exiting')
            return
        shutil.rmtree(opts.model_dir)
    os.makedirs(opts.model_dir)
    os.makedirs(os.path.join(opts.model_dir, 'logs'))
    os.makedirs(os.path.join(opts.model_dir, 'weights'))

    #env = gym.make('MountainCar-v0')
    env = gym.make('LunarLander-v2')
    env._max_episode_steps = 1200

    sess = get_session()
    set_global_seeds(seed)

    train_writer = tf.summary.FileWriter(os.path.join(opts.model_dir, 'logs'),
                                         sess.graph)

    q_func = build_q_func('mlp')

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10)
    replay_buffer = ReplayBuffer(buffer_size)

    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    obs = env.reset()

    for t in range(total_timesteps):
        # Take action and update exploration to the newest value
        env.render()
        update_eps = exploration.value(t)
        action = act(np.array(obs)[None], update_eps=update_eps)[0]
        new_obs, rew, done, _ = env.step(action)
        # Store transition in the replay buffer.
        replay_buffer.add(obs, action, rew, new_obs, float(done))
        obs = new_obs

        episode_rewards[-1] += rew
        if done:
            print("Exploration value: {}".format(exploration.value(t)))
            print("Last 25 episode rewards: {}".format(episode_rewards[-25:]))

            reward_summary = tf.Summary(value=[
                tf.Summary.Value(tag='reward',
                                 simple_value=episode_rewards[-1])
            ])
            train_writer.add_summary(reward_summary, t)

            obs = env.reset()
            episode_rewards.append(0.0)

        if t > learning_starts and t % train_freq == 0:
            # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
            obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                batch_size)
            weights, batch_idxes = np.ones_like(rewards), None
            td_errors, summary = train(obses_t, actions, rewards, obses_tp1,
                                       dones, weights)
            train_writer.add_summary(summary, t)

        if t > learning_starts and t % target_network_update_freq == 0:
            # Update target network periodically.
            update_target()

        if t > learning_starts and t % checkpoint_freq == 0:
            save_variables(
                os.path.join(opts.model_dir, 'weights', '{}.model'.format(t)))
    save_variables(os.path.join(opts.model_dir, 'weights', 'last.model'))
Exemplo n.º 12
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=3000,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=3000,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):


    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)


    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=lambda name: ObservationInput(env.observation_space, name=name),
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        # gamma=gamma,
        # grad_norm_clipping=10,
        # param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(100000),
                                 initial_p=1.0,
                                 final_p=0.02)

    # Initialize the paramete    print(type(act))rs and copy them to the target network.
    U.initialize()
    update_target()





    old_state = None





    formula_LTLf_1 = "!F(die)"
    monitoring_RightToLeft = MonitoringSpecification(
        ltlf_formula=formula_LTLf_1,
        r=1,
        c=-10,
        s=1,
        f=-10
    )



    monitoring_specifications = [monitoring_RightToLeft]

    stepCounter = 0
    done = False

    def RightToLeftConversion(observation) -> TraceStep:

        print(stepCounter)


        if(done and not(stepCounter>=199)):
            die=True
        else:
            die=False


        dictionary={'die': die}
        print(dictionary)
        return dictionary

    multi_monitor = MultiRewardMonitor(
        monitoring_specifications=monitoring_specifications,
        obs_to_trace_step=RightToLeftConversion
    )













    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True


    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        episodeCounter=0
        num_episodes=0

        for t in itertools.count():
            
            # Take action and update exploration to the newest value
            action = act(obs[None], update_eps=exploration.value(t))[0]
            #print(action)
            new_obs, rew, done, _ = env.step(action)
            stepCounter+=1

            rew, is_perm = multi_monitor(new_obs)
            old_state=new_obs




            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew


            is_solved = t > 100 and np.mean(episode_rewards[-101:-1]) >= 200
            if episodeCounter % 100 == 0 or episodeCounter<1:
                # Show off the result
                #print("coming here Again and Again")
                env.render()


            if done:
                episodeCounter+=1
                num_episodes+=1
                obs = env.reset()
                episode_rewards.append(0)
                multi_monitor.reset()
                stepCounter=0
            else:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if t > 1000:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(32)
                    train(obses_t, actions, rewards, obses_tp1, dones, np.ones_like(rewards))

                # Update target network periodically.
                if t % 1000 == 0:
                    update_target()
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            if done and len(episode_rewards) % 10 == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", len(episode_rewards))
                logger.record_tabular("mean 100 episode reward", round(np.mean(episode_rewards[-101:-1]), 1))
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 500 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    act.save_act()
                    #save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        # if model_saved:
        #     if print_freq is not None:
        #         logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
        #     load_variables(model_file)

    return act
Exemplo n.º 13
0
def learn(env,
          network,
          seed=None,
          lr=1e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          multiplayer=False,
          callback=None,
          load_path=None,
          load_path_1=None,
          load_path_2=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """

    # This was all handled in not the most elegant way
    # Variables have a _1 or _2 appended to them to separate them
    # and a bunch of if statementss to have the _2 variables not do anything in single-player

    # when in multiplayer Space Invaders, need to not reward players for other player dying
    isSpaceInvaders = False
    if "SpaceInvaders" in str(env):
        isSpaceInvaders = True

    # put a limit on the amount of memory used, otherwise TensorFlow will consume nearly everything
    # this leaves 1 GB free on my computer, others may need to change it

    # Create all the functions necessary to train the model
    # Create two separate TensorFlow sessions
    graph_1 = tf.Graph()
    sess_1 = tf.Session(graph=graph_1)
    if multiplayer:
        graph_2 = tf.Graph()
        sess_2 = tf.Session(graph=graph_2)
    else:
        # set session 2 to None if it's not being used
        sess_2 = None
    set_global_seeds(seed)
    # specify the q functions as separate objects
    q_func_1 = build_q_func(network, **network_kwargs)
    if multiplayer:
        q_func_2 = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    # build everything for the first model
    # pass in the session and the "_1" suffix
    act_1, train_1, update_target_1, debug_1 = deepq.build_train(
        sess=sess_1,
        make_obs_ph=make_obs_ph,
        q_func=q_func_1,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        scope="deepq")
    # a lot of if multiplayer statements duplicating these actions for a second network
    # pass in session 2 and "_2" instead
    if multiplayer:
        act_2, train_2, update_target_2, debug_2 = deepq.build_train(
            sess=sess_2,
            make_obs_ph=make_obs_ph,
            q_func=q_func_2,
            num_actions=env.action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=lr),
            gamma=gamma,
            grad_norm_clipping=10,
            param_noise=param_noise,
            scope="deepq")

    # separate act_params for each wrapper
    act_params_1 = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func_1,
        'num_actions': env.action_space.n,
    }
    if multiplayer:
        act_params_2 = {
            'make_obs_ph': make_obs_ph,
            'q_func': q_func_2,
            'num_actions': env.action_space.n,
        }
    # make the act wrappers
    act_1 = ActWrapper(act_1, act_params_1)
    if multiplayer:
        act_2 = ActWrapper(act_2, act_params_2)
    # I need to return something if it's single-player
    else:
        act_2 = None

    # Create the replay buffer
    # separate replay buffers are required for each network
    # this is required for competitive because the replay buffers hold rewards
    # and player 2 has different rewards than player 1
    if prioritized_replay:
        replay_buffer_1 = PrioritizedReplayBuffer(
            buffer_size, alpha=prioritized_replay_alpha)
        if multiplayer:
            replay_buffer_2 = PrioritizedReplayBuffer(
                buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer_1 = ReplayBuffer(buffer_size)
        if multiplayer:
            replay_buffer_2 = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    # initialize both sessions
    U.initialize(sess_1)
    if multiplayer:
        U.initialize(sess_2)
    # the session was passed into these functions when they were created
    # the separate update functions work within the different sessions
    update_target_1()
    if multiplayer:
        update_target_2()

    # keep track of rewards for both models separately
    episode_rewards_1 = [0.0]
    saved_mean_reward_1 = None
    if multiplayer:
        episode_rewards_2 = [0.0]
        saved_mean_reward_2 = None
    obs = env.reset()
    reset = True

    # storing stuff in a temporary directory while it's working
    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td
        model_file_1 = os.path.join(td, "model_1")
        temp_file_1 = os.path.join(td, "temp_1")
        model_saved_1 = False
        if multiplayer:
            model_file_2 = os.path.join(td, "model_2")
            temp_file_2 = os.path.join(td, "temp_2")
            model_saved_2 = False

        if tf.train.latest_checkpoint(td) is not None:
            if multiplayer:
                # load both models if multiplayer is on
                load_variables(model_file_1, sess_1)
                logger.log('Loaded model 1 from {}'.format(model_file_1))
                model_saved_1 = True
                load_variables(model_file_2, sess_2)
                logger.log('Loaded model 2 from {}'.format(model_file_2))
                model_saved_2 = True
            # otherwise just load the first one
            else:
                load_variables(model_file_1, sess_1)
                logger.log('Loaded model from {}'.format(model_file_1))
                model_saved_1 = True
        # I have separate load variables for single-player and multiplayer
        # this should be None if multiplayer is on
        elif load_path is not None:
            load_variables(load_path, sess_1)
            logger.log('Loaded model from {}'.format(load_path))
        # load the separate models in for multiplayer
        # should load the variables into the appropriate sessions

        # my format may restrict things to working properly only when a Player 1 model is loaded into session 1, and same for Player 2
        # however, in practice, the models won't work properly otherwise
        elif multiplayer:
            if load_path_1 is not None:
                load_variables(load_path_1, sess_1)
                logger.log('Loaded model 1 from {}'.format(load_path_1))
            if load_path_2 is not None:
                load_variables(load_path_2, sess_2)
                logger.log('Loaded model 2 from {}'.format(load_path_2))

        # actual training starts here
        for t in range(total_timesteps):
            # use this for updating purposes
            actual_t = t + 1
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            # receive model 1's action based on the model and observation
            action_1 = act_1(np.array(obs)[None],
                             update_eps=update_eps,
                             **kwargs)[0]
            env_action_1 = action_1
            # do the same for model 2 if in multiplayer
            if multiplayer:
                action_2 = act_2(np.array(obs)[None],
                                 update_eps=update_eps,
                                 **kwargs)[0]
                env_action_2 = action_2
            reset = False
            # apply actions to the environment
            if multiplayer:
                new_obs, rew_1, rew_2, done, _ = env.step(
                    env_action_1, env_action_2)
            # apply single action if there isn't a second model
            else:
                new_obs, rew_1, rew_2, done, _ = env.step(env_action_1)

            # manual clipping for Space Invaders multiplayer
            if isSpaceInvaders and multiplayer:
                # don't reward a player when the other player dies
                # change the reward to 0
                # the only time either player will get rewarded 200 is when the other player dies
                if rew_1 >= 200:
                    rew_1 = rew_1 - 200.0
                if rew_2 >= 200:
                    rew_2 = rew_2 - 200.0
                # manually clip the rewards using the sign function
                rew_1 = np.sign(rew_1)
                rew_2 = np.sign(rew_2)
                combo_factor = 0.25
                rew_1_combo = rew_1 + combo_factor * rew_2
                rew_2_combo = rew_2 + combo_factor * rew_1
                rew_1 = rew_1_combo
                rew_2 = rew_2_combo

            # Store transition in the replay buffers
            replay_buffer_1.add(obs, action_1, rew_1, new_obs, float(done))
            if multiplayer:
                # pass reward_2 to the second player
                # this reward will vary based on the game
                replay_buffer_2.add(obs, action_2, rew_2, new_obs, float(done))
            obs = new_obs
            # separate rewards for each model
            episode_rewards_1[-1] += rew_1
            if multiplayer:
                episode_rewards_2[-1] += rew_2
            if done:
                obs = env.reset()
                episode_rewards_1.append(0.0)
                if multiplayer:
                    episode_rewards_2.append(0.0)
                reset = True
            if actual_t > learning_starts and actual_t % train_freq == 0:

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                # sample from the two replay buffers
                if prioritized_replay:
                    experience_1 = replay_buffer_1.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t_1, actions_1, rewards_1, obses_tp1_1, dones_1,
                     weights_1, batch_idxes_1) = experience_1
                    # keep all the variables with separate names
                    if multiplayer:
                        experience_2 = replay_buffer_2.sample(
                            batch_size, beta=beta_schedule.value(t))
                        (obses_t_2, actions_2, rewards_2, obses_tp1_2, dones_2,
                         weights_2, batch_idxes_2) = experience_2
                # do the same if there's no prioritization
                else:
                    obses_t_1, actions_1, rewards_1, obses_tp1_1, dones_1 = replay_buffer_1.sample(
                        batch_size)
                    weights_1, batch_idxes_1 = np.ones_like(rewards_1), None
                    if multiplayer:
                        obses_t_2, actions_2, rewards_2, obses_tp1_2, dones_2 = replay_buffer_2.sample(
                            batch_size)
                        weights_2, batch_idxes_2 = np.ones_like(
                            rewards_2), None
                # actually train the model based on the samples
                td_errors_1 = train_1(obses_t_1, actions_1, rewards_1,
                                      obses_tp1_1, dones_1, weights_1)
                if multiplayer:
                    td_errors_2 = train_2(obses_t_2, actions_2, rewards_2,
                                          obses_tp1_2, dones_2, weights_2)
                # give new priority weights to the observations
                if prioritized_replay:
                    new_priorities_1 = np.abs(
                        td_errors_1) + prioritized_replay_eps
                    replay_buffer_1.update_priorities(batch_idxes_1,
                                                      new_priorities_1)
                    if multiplayer:
                        new_priorities_2 = np.abs(
                            td_errors_2) + prioritized_replay_eps
                        replay_buffer_2.update_priorities(
                            batch_idxes_2, new_priorities_2)

            if actual_t > learning_starts and actual_t % target_network_update_freq == 0:
                # Update target networks periodically.
                update_target_1()
                if multiplayer:
                    update_target_2()

            # this section is for the purposes of logging stuff
            # calculate the average reward over the last 100 episodes
            mean_100ep_reward_1 = round(np.mean(episode_rewards_1[-101:-1]), 1)
            if multiplayer:
                mean_100ep_reward_2 = round(
                    np.mean(episode_rewards_2[-101:-1]), 1)
            num_episodes = len(episode_rewards_1)
            # every given number of episodes log and print out the appropriate stuff
            if done and print_freq is not None and len(
                    episode_rewards_1) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                # print out both rewards if multiplayer
                if multiplayer:
                    logger.record_tabular("mean 100 episode reward 1",
                                          mean_100ep_reward_1)
                    logger.record_tabular("mean 100 episode reward 2",
                                          mean_100ep_reward_2)
                else:
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward_1)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            # save best-performing version of each model
            # I've opted out of this for competitive multiplayer because it's difficult to determine what's "best"

            if (checkpoint_freq is not None and actual_t > learning_starts
                    and num_episodes > 100
                    and actual_t % checkpoint_freq == 0):
                # if there's a best reward, save it as the new best model
                if saved_mean_reward_1 is None or mean_100ep_reward_1 > saved_mean_reward_1:
                    if print_freq is not None:
                        if multiplayer:
                            logger.log(
                                "Saving model 1 due to mean reward increase: {} -> {}"
                                .format(saved_mean_reward_1,
                                        mean_100ep_reward_1))
                        else:
                            logger.log(
                                "Saving model due to mean reward increase: {} -> {}"
                                .format(saved_mean_reward_1,
                                        mean_100ep_reward_1))
                    save_variables(model_file_1, sess_1)
                    model_saved_1 = True
                    saved_mean_reward_1 = mean_100ep_reward_1

                if multiplayer and (saved_mean_reward_2 is None or
                                    mean_100ep_reward_2 > saved_mean_reward_2):
                    if print_freq is not None:
                        logger.log(
                            "Saving model 2 due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward_2, mean_100ep_reward_2))
                    save_variables(model_file_2, sess_2)
                    model_saved_2 = True
                    saved_mean_reward_2 = mean_100ep_reward_2

        # restore models at the end to the best performers
        if model_saved_1:
            if print_freq is not None:
                logger.log("Restored model 1 with mean reward: {}".format(
                    saved_mean_reward_1))
            load_variables(model_file_1, sess_1)
        if multiplayer and model_saved_2:
            if print_freq is not None:
                logger.log("Restored model 2 with mean reward: {}".format(
                    saved_mean_reward_2))
            load_variables(model_file_2, sess_2)
    return act_1, act_2, sess_1, sess_2
Exemplo n.º 14
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    
    ############################## RL-S Prepare #############################################
    
    # model saved name
    saved_name = "0817"

    #####
    # Setup Training Record
    #####
    save_new_data = False
    create_new_file = False
    create_new_file_rule = create_new_file
    save_new_data_rule = save_new_data

    create_new_file_RL = False
    save_new_data_RL = save_new_data
    
    create_new_file_replay_buffer = False
    save_new_data_replay_buffer = save_new_data

    is_training = False
    trajectory_buffer = deque(maxlen=20)

    if create_new_file_replay_buffer:
        if osp.exists("recorded_replay_buffer.txt"):
            os.remove("recorded_replay_buffer.txt")
    else:
        replay_buffer_dataset = np.loadtxt("recorded_replay_buffer.txt")
        for data in replay_buffer_dataset:
            obs, action, rew, new_obs, done = _extract_data(data)
            replay_buffer.add(obs, action, rew, new_obs, done)

    recorded_replay_buffer_outfile = open("recorded_replay_buffer.txt","a")
    recorded_replay_buffer_format = " ".join(("%f",)*31)+"\n"
    
    #####
    # Setup Rule-based Record
    #####
    create_new_file_rule = True

    # create state database
    if create_new_file_rule:
        if osp.exists("state_index_rule.dat"):
            os.remove("state_index_rule.dat")
            os.remove("state_index_rule.idx")
        if osp.exists("visited_state_rule.txt"):
            os.remove("visited_state_rule.txt")
        if osp.exists("visited_value_rule.txt"):
            os.remove("visited_value_rule.txt")

        visited_state_rule_value = []
        visited_state_rule_counter = 0
    else:
        visited_state_rule_value = np.loadtxt("visited_value_rule.txt")
        visited_state_rule_value = visited_state_rule_value.tolist()
        visited_state_rule_counter = len(visited_state_rule_value)

    visited_state_rule_outfile = open("visited_state_rule.txt", "a")
    visited_state_format = " ".join(("%f",)*14)+"\n"

    visited_value_rule_outfile = open("visited_value_rule.txt", "a")
    visited_value_format = " ".join(("%f",)*2)+"\n"

    visited_state_tree_prop = rindex.Property()
    visited_state_tree_prop.dimension = 14
    visited_state_dist = np.array([[0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2]])
    visited_state_rule_tree = rindex.Index('state_index_rule',properties=visited_state_tree_prop)

    #####
    # Setup RL-based Record
    #####

    if create_new_file_RL:
        if osp.exists("state_index_RL.dat"):
            os.remove("state_index_RL.dat")
            os.remove("state_index_RL.idx")
        if osp.exists("visited_state_RL.txt"):
            os.remove("visited_state_RL.txt")
        if osp.exists("visited_value_RL.txt"):
            os.remove("visited_value_RL.txt")

    if create_new_file_RL:
        visited_state_RL_value = []
        visited_state_RL_counter = 0
    else:
        visited_state_RL_value = np.loadtxt("visited_value_RL.txt")
        visited_state_RL_value = visited_state_RL_value.tolist()
        visited_state_RL_counter = len(visited_state_RL_value)

    visited_state_RL_outfile = open("visited_state_RL.txt", "a")
    visited_state_format = " ".join(("%f",)*14)+"\n"

    visited_value_RL_outfile = open("visited_value_RL.txt", "a")
    visited_value_format = " ".join(("%f",)*2)+"\n"

    visited_state_tree_prop = rindex.Property()
    visited_state_tree_prop.dimension = 14
    visited_state_dist = np.array([[0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2]])
    visited_state_RL_tree = rindex.Index('state_index_RL',properties=visited_state_tree_prop)


    ############################## RL-S Prepare End #############################################
    
    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action, q_function_cz = act(np.array(obs)[None], update_eps=update_eps, **kwargs)
            
            # RLS_action = generate_RLS_action(obs,q_function_cz,action,visited_state_rule_value,
            #                                 visited_state_rule_tree,visited_state_RL_value,
            #                                 visited_state_RL_tree,is_training)

            RLS_action = 0

            env_action = RLS_action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)

            ########### Record data in trajectory buffer and local file, but not in replay buffer ###########

            trajectory_buffer.append((obs, action, float(rew), new_obs, float(done)))

            # Store transition in the replay buffer.
            # replay_buffer.add(obs, action, rew, new_obs, float(done))

            obs = new_obs
            episode_rewards[-1] += rew # safe driving is 1, collision is 0


            while len(trajectory_buffer)>10:
                # if safe driving for 10(can be changed) steps, the state is regarded as safe
                obs_left, action_left, rew_left, new_obs_left, done_left = trajectory_buffer.popleft()
                # save this state in local replay buffer file
                if save_new_data_replay_buffer:
                    recorded_data = _wrap_data(obs_left, action_left, rew_left, new_obs_left, done_left)
                    recorded_replay_buffer_outfile.write(recorded_replay_buffer_format % tuple(recorded_data))
                # put this state in replay buffer
                replay_buffer.add(obs_left[0], action_left, float(rew_left), new_obs_left[0], float(done_left))
                action_to_record = action_left
                r_to_record = rew_left
                obs_to_record = obs_left

                # save this state in rule-based or RL-based visited state
                if action_left == 0:
                    if save_new_data_rule:
                        visited_state_rule_value.append([action_to_record,r_to_record])
                        visited_state_rule_tree.insert(visited_state_rule_counter,
                            tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                        visited_state_rule_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                        visited_value_rule_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                        visited_state_rule_counter += 1
                else:
                    if save_new_data_RL:
                        visited_state_RL_value.append([action_to_record,r_to_record])
                        visited_state_RL_tree.insert(visited_state_RL_counter,
                            tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                        visited_state_RL_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                        visited_value_RL_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                        visited_state_RL_counter += 1

            ################# Record data end ########################
            
            
            if done:
                """ 
                Get collision or out of multilane map
                """
                ####### Record the trajectory data and add data in replay buffer #########
                _, _, rew_right, _, _ = trajectory_buffer[-1]

                while len(trajectory_buffer)>0:
                    obs_left, action_left, rew_left, new_obs_left, done_left = trajectory_buffer.popleft()
                    action_to_record = action_left
                    r_to_record = (rew_right-rew_left)*gamma**len(trajectory_buffer) + rew_left
                    # record in local replay buffer file
                    if save_new_data_replay_buffer:
                        obs_to_record = obs_left
                        recorded_data = _wrap_data(obs_left, action_left, r_to_record, new_obs_left, done_left)
                        recorded_replay_buffer_outfile.write(recorded_replay_buffer_format % tuple(recorded_data))
                    # record in replay buffer for trainning
                    replay_buffer.add(obs_left[0], action_left, float(r_to_record), new_obs_left[0], float(done_left))

                    # save visited rule/RL state data in local file
                    if action_left == 0:
                        if save_new_data_rule:
                            visited_state_rule_value.append([action_to_record,r_to_record])
                            visited_state_rule_tree.insert(visited_state_rule_counter,
                                tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                            visited_state_rule_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                            visited_value_rule_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                            visited_state_rule_counter += 1
                    else:
                        if save_new_data_RL:
                            visited_state_RL_value.append([action_to_record,r_to_record])
                            visited_state_RL_tree.insert(visited_state_RL_counter,
                                tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                            visited_state_RL_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                            visited_value_RL_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                            visited_state_RL_counter += 1

                ####### Recorded #####

                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            ############### Trainning Part Start #####################
            if not is_training:
                # don't need to train the model
                continue

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward

                    rew_str = str(mean_100ep_reward)
                    path = osp.expanduser("~/models/carlaok_checkpoint/"+saved_name+"_"+rew_str)
                    act.save(path)

        #### close the file ####
        visited_state_rule_outfile.close()
        visited_value_rule_outfile.close()
        recorded_replay_buffer_outfile.close()
        if not is_training:
            testing_record_outfile.close()
        #### close the file ###

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Exemplo n.º 15
0
def init_wrapper(env,
                 network_type,
                 lr=1e-4,
                 gamma=1.0,
                 param_noise=True,
                 buffer_size=int(1e5),
                 prioritized_replay_alpha=.6,
                 prioritized_replay=True,
                 prioritized_replay_beta_iters=None,
                 prioritized_replay_beta=.4,
                 exploration_fraction=.1,
                 grad_norm_clipping=10,
                 total_timesteps=int(1e6),
                 exploration_final_eps=0.02,
                 **network_kwargs):
    # decomposes baseline deepq into initialize and inference components
    # basically copied from deepqn repository

    # see baselines repo for concise param documentation

    q_func = build_q_func(network_type, **network_kwargs)

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=grad_norm_clipping,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer

    # WARNING: do not use internal replay buffer, use baselines only for
    # stability reasons

    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    # return hashed objects
    return {
        'train_function': train,
        'act_function': act,
        'replay_buffer': replay_buffer,
        'update_target_function': update_target,
        'exploration_scheme': exploration,
        'beta_schedule': beta_schedule
    }
Exemplo n.º 16
0
def learn(env,
          network,
          seed=None,
          lr=1e-3,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          num_cpu=5,
          callback=None,
          scope='co_deepq',
          pilot_tol=0,
          pilot_is_human=False,
          reuse=False,
          load_path=None,
          **network_kwargs):
    # Create all the functions necessary to train the model

    sess = get_session()  #tf.Session(graph=tf.Graph())
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    using_control_sharing = True  #pilot_tol > 0

    if pilot_is_human:
        utils.human_agent_action = init_human_action()
        utils.human_agent_active = False

    act, train, update_target, debug = co_build_train(
        scope=scope,
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        reuse=tf.AUTO_REUSE if reuse else False,
        using_control_sharing=using_control_sharing)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    episode_outcomes = []
    saved_mean_reward = None
    obs = env.reset()
    reset = True
    prev_t = 0
    rollouts = []

    if not using_control_sharing:
        exploration = LinearSchedule(schedule_timesteps=int(
            exploration_fraction * total_timesteps),
                                     initial_p=1.0,
                                     final_p=exploration_final_eps)

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            masked_obs = mask_helipad(obs)

            act_kwargs = {}
            if using_control_sharing:
                if pilot_is_human:
                    act_kwargs['pilot_action'] = env.unwrapped.pilot_policy(
                        obs[None, :9])
                else:
                    act_kwargs[
                        'pilot_action'] = env.unwrapped.pilot_policy.step(
                            obs[None, :9])
                act_kwargs['pilot_tol'] = pilot_tol if not pilot_is_human or (
                    pilot_is_human and utils.human_agent_active) else 0
            else:
                act_kwargs['update_eps'] = exploration.value(t)

            #action = act(masked_obs[None, :], **act_kwargs)[0][0]
            action = act(np.array(masked_obs)[None], **act_kwargs)[0][0]
            env_action = action
            reset = False
            new_obs, rew, done, info = env.step(env_action)

            if pilot_is_human:
                env.render()

            # Store transition in the replay buffer.
            masked_new_obs = mask_helipad(new_obs)
            replay_buffer.add(masked_obs, action, rew, masked_new_obs,
                              float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

                if pilot_is_human:
                    utils.human_agent_action = init_human_action()
                    utils.human_agent_active = False
                    time.sleep(2)

            if t > learning_starts and t % train_freq == 0:
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)

                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            episode_outcomes.append(rew)
            episode_rewards.append(0.0)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            mean_100ep_succ = round(
                np.mean(
                    [1 if x == 100 else 0 for x in episode_outcomes[-101:-1]]),
                2)
            mean_100ep_crash = round(
                np.mean([
                    1 if x == -100 else 0 for x in episode_outcomes[-101:-1]
                ]), 2)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("mean 100 episode succ", mean_100ep_succ)
                logger.record_tabular("mean 100 episode crash",
                                      mean_100ep_crash)
                logger.dump_tabular()

            if checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0 and (
                    saved_mean_reward is None
                    or mean_100ep_reward > saved_mean_reward):
                if print_freq is not None:
                    logger.log(
                        "Saving model due to mean reward increase: {} -> {}".
                        format(saved_mean_reward, mean_100ep_reward))
                save_variables(model_file)
                model_saved = True
                saved_mean_reward = mean_100ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    reward_data = {'rewards': episode_rewards, 'outcomes': episode_outcomes}

    return act, reward_data