Пример #1
0
    def load_act(path):
        with open(path, "rb") as f:
            model_data, act_params = cloudpickle.load(f)
        act = deepq.build_act(**act_params)
        sess = tf.Session()
        sess.__enter__()
        with tempfile.TemporaryDirectory() as td:
            arc_path = os.path.join(td, "packed.zip")
            with open(arc_path, "wb") as f:
                f.write(model_data)

            zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
            load_variables(os.path.join(td, "model"))

        return ActWrapper(act, act_params)
Пример #2
0
 def load(self, path):
     U.load_variables(path, sess=self.sess)
Пример #3
0
def learn(env,
          network,
          seed=None,
          pool=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_initial_eps=1.0,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=1,
          checkpoint_freq=100,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          experiment_name='unnamed',
          load_path=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    experiment_name: str
        name of the experiment (default: trial)
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=exploration_initial_eps,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    reward_shaper = ActionAdviceRewardShaper('../completed-observations')
    reward_shaper.load()

    full_exp_name = '{}-{}'.format(date.today().isoformat(), experiment_name)
    experiment_dir = os.path.join('experiments', full_exp_name)
    if not os.path.exists(experiment_dir):
        os.makedirs(experiment_dir)

    summary_dir = os.path.join(experiment_dir, 'summaries')
    os.makedirs(summary_dir, exist_ok=True)
    summary_writer = tf.summary.FileWriter(summary_dir)

    checkpoint_dir = os.path.join(experiment_dir, 'checkpoints')
    os.makedirs(checkpoint_dir, exist_ok=True)

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_dir or td

        os.makedirs(td, exist_ok=True)
        model_file = os.path.join(td, "best_model")
        model_saved = False
        saved_mean_reward = None

        if os.path.exists(model_file):
            print('Model is loading')
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        episode_rewards = []
        update_step_t = 0
        while update_step_t < total_timesteps:
            # Reset the environment
            obs = env.reset()
            obs = StatePreprocessor.process(obs)
            episode_rewards.append(0.0)
            reset = True
            done = False
            # Sample the episode until it is completed
            act_step_t = update_step_t
            while not done:
                if callback is not None:
                    if callback(locals(), globals()):
                        break
                # Take action and update exploration to the newest value
                kwargs = {}
                if not param_noise:
                    update_eps = exploration.value(act_step_t)
                    update_param_noise_threshold = 0.
                else:
                    update_eps = 0.
                    # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                    # policy is comparable to eps-greedy exploration with eps = exploration.value(act_step_t).
                    # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                    # for detailed explanation.
                    update_param_noise_threshold = -np.log(
                        1. - exploration.value(act_step_t) +
                        exploration.value(act_step_t) /
                        float(env.action_space.n))
                    kwargs['reset'] = reset
                    kwargs[
                        'update_param_noise_threshold'] = update_param_noise_threshold
                    kwargs['update_param_noise_scale'] = True
                biases = reward_shaper.get_action_potentials(obs)
                action = act(np.array(obs)[None],
                             biases,
                             update_eps=update_eps,
                             **kwargs)[0]
                reset = False

                pairs = env.step(action)
                action, (new_obs, rew, done, _) = pairs[-1]
                # Write down the real reward but learn from normalized version
                episode_rewards[-1] += rew
                rew = np.sign(rew) * np.log(1 + np.abs(rew))
                new_obs = StatePreprocessor.process(new_obs)

                logger.log('{}/{} obs {} action {}'.format(
                    act_step_t, total_timesteps, obs, action))
                act_step_t += 1
                if len(new_obs) == 0:
                    done = True
                else:
                    replay_buffer.add(obs, action, rew, new_obs, float(done))
                    obs = new_obs
            # Post episode logging
            summary = tf.Summary(value=[
                tf.Summary.Value(tag="rewards",
                                 simple_value=episode_rewards[-1])
            ])
            summary_writer.add_summary(summary, act_step_t)
            summary = tf.Summary(
                value=[tf.Summary.Value(tag="eps", simple_value=update_eps)])
            summary_writer.add_summary(summary, act_step_t)
            summary = tf.Summary(value=[
                tf.Summary.Value(tag="episode_steps",
                                 simple_value=act_step_t - update_step_t)
            ])
            summary_writer.add_summary(summary, act_step_t)
            mean_5ep_reward = round(np.mean(episode_rewards[-5:]), 1)
            num_episodes = len(episode_rewards)
            if print_freq is not None and num_episodes % print_freq == 0:
                logger.record_tabular("steps", act_step_t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 5 episode reward", mean_5ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(act_step_t)))
                logger.dump_tabular()
            # Do the learning
            start = time.time()
            while update_step_t < min(act_step_t, total_timesteps):
                if update_step_t % train_freq == 0:
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if prioritized_replay:
                        experience = replay_buffer.sample(
                            batch_size,
                            beta=beta_schedule.value(update_step_t))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                            batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    biases_t = pool.map(reward_shaper.get_action_potentials,
                                        obses_t)
                    biases_tp1 = pool.map(reward_shaper.get_action_potentials,
                                          obses_tp1)
                    td_errors, weighted_error = train(obses_t, biases_t,
                                                      actions, rewards,
                                                      obses_tp1, biases_tp1,
                                                      dones, weights)

                    # Loss logging
                    summary = tf.Summary(value=[
                        tf.Summary.Value(tag='weighted_error',
                                         simple_value=weighted_error)
                    ])
                    summary_writer.add_summary(summary, update_step_t)

                    if prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + prioritized_replay_eps
                        replay_buffer.update_priorities(
                            batch_idxes, new_priorities)
                if update_step_t % target_network_update_freq == 0:
                    # Update target network periodically.
                    update_target()
                update_step_t += 1
            stop = time.time()
            logger.log("Learning took {:.2f} seconds".format(stop - start))
            if checkpoint_freq is not None and num_episodes % checkpoint_freq == 0:
                # Periodically save the model and the replay buffer
                rec_model_file = os.path.join(
                    td, "model_{}_{:.2f}".format(num_episodes,
                                                 mean_5ep_reward))
                save_variables(rec_model_file)
                buffer_file = os.path.join(
                    td, "buffer_{}_{}".format(num_episodes, update_step_t))
                with open(buffer_file, 'wb') as foutput:
                    cloudpickle.dump(replay_buffer, foutput)
                # Check whether it is best
                if saved_mean_reward is None or mean_5ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_5ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_5ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act
Пример #4
0
def learn(env,
          policy_func,
          reward_giver,
          expert_dataset,
          rank,
          pretrained,
          pretrained_weight,
          *,
          g_step,
          d_step,
          entcoeff,
          save_per_iter,
          ckpt_dir,
          log_dir,
          timesteps_per_batch,
          task_name,
          gamma,
          lam,
          max_kl,
          cg_iters,
          cg_damping=1e-2,
          vf_stepsize=1e-4,
          d_stepsize=1e-4,
          vf_iters=3,
          max_timesteps=0,
          max_episodes=0,
          max_iters=0,
          callback=None):

    nworkers = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    np.set_printoptions(precision=3)
    # Setup losses and stuff
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi",
                     ob_space,
                     ac_space,
                     reuse=(pretrained_weight != None))  #
    oldpi = policy_func("oldpi", ob_space, ac_space)
    atarg = tf.placeholder(
        dtype=tf.float32,
        shape=[None])  # Target advantage function (if applicable)
    ret = tf.placeholder(dtype=tf.float32, shape=[None])  # Empirical return

    #ob = U.get_placeholder_cached(name="ob")
    ob_config = U.get_placeholder_cached(name="ob")
    ob_target = U.get_placeholder_cached(name="goal")
    obs_pos = U.get_placeholder_cached(name="obs_pos")
    obs_ori = U.get_placeholder_cached(name="obs_ori")
    ac = pi.pdtype.sample_placeholder([None])

    kloldnew = oldpi.pd.kl(pi.pd)
    ent = pi.pd.entropy()
    meankl = tf.reduce_mean(kloldnew)
    meanent = tf.reduce_mean(ent)
    entbonus = entcoeff * meanent

    vferr = tf.reduce_mean(tf.square(pi.vpred - ret))

    ratio = tf.exp(pi.pd.logp(ac) -
                   oldpi.pd.logp(ac))  # advantage * pnew / pold
    surrgain = tf.reduce_mean(ratio * atarg)

    optimgain = surrgain + entbonus
    losses = [optimgain, meankl, entbonus, surrgain, meanent]
    loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]

    dist = meankl

    all_var_list = pi.get_trainable_variables()
    var_list = [
        v for v in all_var_list if v.name.startswith("pi/pol")
        or v.name.startswith("pi/logstd") or v.name.startswith("pi/obs")
    ]
    vf_var_list = [
        v for v in all_var_list
        if v.name.startswith("pi/vf") or v.name.startswith("pi/obs")
    ]
    # assert len(var_list) == len(vf_var_list) + 1
    d_adam = MpiAdam(reward_giver.get_trainable_variables())
    vfadam = MpiAdam(vf_var_list)

    get_flat = U.GetFlat(var_list)
    set_from_flat = U.SetFromFlat(var_list)
    klgrads = tf.gradients(dist, var_list)
    flat_tangent = tf.placeholder(dtype=tf.float32,
                                  shape=[None],
                                  name="flat_tan")
    shapes = [var.get_shape().as_list() for var in var_list]
    start = 0
    tangents = []
    for shape in shapes:
        sz = U.intprod(shape)
        tangents.append(tf.reshape(flat_tangent[start:start + sz], shape))
        start += sz
    gvp = tf.add_n([
        tf.reduce_sum(g * tangent)
        for (g, tangent) in zipsame(klgrads, tangents)
    ])  # pylint: disable=E1111
    fvp = U.flatgrad(gvp, var_list)

    assign_old_eq_new = U.function(
        [], [],
        updates=[
            tf.assign(oldv, newv)
            for (oldv,
                 newv) in zipsame(oldpi.get_variables(), pi.get_variables())
        ])
    compute_losses = U.function(
        [ob_config, ob_target, obs_pos, obs_ori, ac, atarg], losses)
    compute_lossandgrad = U.function(
        [ob_config, ob_target, obs_pos, obs_ori, ac, atarg],
        losses + [U.flatgrad(optimgain, var_list)])
    compute_fvp = U.function(
        [flat_tangent, ob_config, ob_target, obs_pos, obs_ori, ac, atarg], fvp)
    compute_vflossandgrad = U.function(
        [ob_config, ob_target, obs_pos, obs_ori, ret],
        U.flatgrad(vferr, vf_var_list))

    @contextmanager
    def timed(msg):
        if rank == 0:
            print(colorize(msg, color='magenta'))
            tstart = time.time()
            yield
            print(
                colorize("done in %.3f seconds" % (time.time() - tstart),
                         color='magenta'))
        else:
            yield

    def allmean(x):
        assert isinstance(x, np.ndarray)
        out = np.empty_like(x)
        MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
        out /= nworkers
        return out

    U.initialize()
    th_init = get_flat()
    MPI.COMM_WORLD.Bcast(th_init, root=0)
    set_from_flat(th_init)
    d_adam.sync()
    vfadam.sync()
    if rank == 0:
        print("Init param sum", th_init.sum(), flush=True)

    # Prepare for rollouts
    # ----------------------------------------
    seg_gen = traj_segment_generator(pi,
                                     env,
                                     reward_giver,
                                     timesteps_per_batch,
                                     stochastic=True)

    episodes_so_far = 0
    timesteps_so_far = 0
    iters_so_far = 0
    tstart = time.time()
    lenbuffer = deque(maxlen=40)  # rolling buffer for episode lengths
    rewbuffer = deque(maxlen=40)  # rolling buffer for episode rewards
    true_rewbuffer = deque(maxlen=40)

    assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1

    g_loss_stats = stats(loss_names)
    d_loss_stats = stats(reward_giver.loss_name)
    ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
    # if provide pretrained weight
    if pretrained_weight is not None:
        U.load_variables(pretrained_weight, variables=pi.get_variables())
    th_afterbc = get_flat()
    print("param sum after bc", th_afterbc.sum(), flush=True)
    while True:
        if callback: callback(locals(), globals())
        if max_timesteps and timesteps_so_far >= max_timesteps:
            break
        elif max_episodes and episodes_so_far >= max_episodes:
            break
        elif max_iters and iters_so_far >= max_iters:
            break

        # Save model
        if rank == 0 and iters_so_far % save_per_iter == 0 and ckpt_dir is not None:
            fname = os.path.join(ckpt_dir, task_name)
            os.makedirs(os.path.dirname(fname), exist_ok=True)
            saver = tf.train.Saver()
            saver.save(tf.get_default_session(), fname)

        logger.log("********** Iteration %i ************" % iters_so_far)

        def fisher_vector_product(p):
            v1 = allmean(compute_fvp(p, *fvpargs))
            # print("norm(v1):%.2e, norm(p):%.2e, cg_damping:%.2e"%(np.linalg.norm(v1), np.linalg.norm(p), cg_damping))
            return v1 + cg_damping * p

        # ------------------ Update G ------------------
        logger.log("Optimizing Policy...")
        for _ in range(g_step):
            with timed("sampling"):
                seg = seg_gen.__next__()
            add_vtarg_and_adv(seg, gamma, lam)
            # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
            ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[
                "tdlamret"]
            vpredbefore = seg[
                "vpred"]  # predicted value function before udpate
            atarg = (atarg - atarg.mean()) / atarg.std(
            )  # standardized advantage function estimate

            if hasattr(pi, "ob_rms"):
                pi.ob_rms.update(ob)  # update running mean/std for policy
            config, goal, obstacle_pos, obstacle_ori = [], [], [], []
            for o in seg["ob"]:
                config.append(o["joint"])
                goal.append(o["target"])
                obstacle_pos.append(o["obstacle_pos"])
                obstacle_ori.append(o["obstacle_ori"])
            config, goal, obstacle_pos, obstacle_ori = map(
                np.array, [config, goal, obstacle_pos, obstacle_ori])
            args = config, goal, obstacle_pos, obstacle_ori, seg["ac"], atarg
            fvpargs = [arr[::5] for arr in args]

            assign_old_eq_new(
            )  # set old parameter values to new parameter values
            with timed("computegrad"):
                *lossbefore, g = compute_lossandgrad(*args)
            lossbefore = allmean(np.array(lossbefore))
            g = allmean(g)
            if np.allclose(g, 0):
                logger.log("Got zero gradient. not updating")
            else:
                with timed("cg"):
                    '''stepdir0 = cg(fisher_vector_product, g, cg_iters=15, verbose=rank == 0)
                    print('iter:10, norm of g: {:.4f}, error of cg: {:.4f}'.format(np.linalg.norm(g), np.linalg.norm(
                        g - compute_fvp(stepdir0, *fvpargs))))'''
                    stepdir = cg(fisher_vector_product,
                                 g,
                                 cg_iters=cg_iters,
                                 verbose=rank == 0)
                    print('iter:{:d}, norm of g: {:.4f}, error of cg: {:.4f}'.
                          format(
                              cg_iters, np.linalg.norm(g),
                              np.linalg.norm(g -
                                             compute_fvp(stepdir, *fvpargs))))
                    '''stepdir2 = cg(fisher_vector_product, g, cg_iters=200, verbose=rank == 0)
                    print('iter:200, norm of g: {:.4f}, error of cg: {:.4f}'.format(np.linalg.norm(g), np.linalg.norm(
                        g - compute_fvp(stepdir2, *fvpargs))))'''
                assert np.isfinite(stepdir).all()

                shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
                lm = np.sqrt(shs / max_kl)
                # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
                fullstep = stepdir / lm
                expectedimprove = g.dot(fullstep)
                surrbefore = lossbefore[0]
                stepsize = 1.0
                thbefore = get_flat()
                for _ in range(10):
                    thnew = thbefore + fullstep * stepsize
                    set_from_flat(thnew)
                    meanlosses = surr, kl, *_ = allmean(
                        np.array(compute_losses(*args)))
                    improve = surr - surrbefore
                    logger.log("Expected: %.3f Actual: %.3f" %
                               (expectedimprove, improve))
                    if not np.isfinite(meanlosses).all():
                        logger.log("Got non-finite value of losses -- bad!")
                    elif kl > max_kl * 1.5:
                        logger.log("violated KL constraint. shrinking step.")
                    elif improve < 0:
                        logger.log("surrogate didn't improve. shrinking step.")
                    else:
                        logger.log("Stepsize OK!")
                        break
                    stepsize *= .5
                else:
                    logger.log("couldn't compute a good step")
                    set_from_flat(thbefore)
                if nworkers > 1 and iters_so_far % 20 == 0:
                    paramsums = MPI.COMM_WORLD.allgather(
                        (thnew.sum(),
                         vfadam.getflat().sum()))  # list of tuples
                    assert all(
                        np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
            with timed("vf"):
                for _ in range(vf_iters):
                    for (mbob, mbg, mbop, mboo, mbret) in dataset.iterbatches(
                        (config, goal, obstacle_pos, obstacle_ori,
                         seg["tdlamret"]),
                            include_final_partial_batch=False,
                            batch_size=128):
                        if hasattr(pi, "ob_rms"):
                            pi.ob_rms.update(
                                mbob)  # update running mean/std for policy
                        g = allmean(
                            compute_vflossandgrad(mbob, mbg, mbop, mboo,
                                                  mbret))
                        vfadam.update(g, vf_stepsize)

        g_losses = meanlosses
        for (lossname, lossval) in zip(loss_names, meanlosses):
            logger.record_tabular(lossname, lossval)
        logger.record_tabular("ev_tdlam_before",
                              explained_variance(vpredbefore, tdlamret))
        #mean = pi.pd.mean.eval()
        #print(mean)
        # ------------------ Update D ------------------
        logger.log("Optimizing Discriminator...")
        logger.log(fmt_row(13, reward_giver.loss_name))
        ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob))
        batch_size = len(ob) // d_step
        d_losses = [
        ]  # list of tuples, each of which gives the loss for a minibatch
        dof = env.env.env.dof
        for ob_batch, goal_batch, obs_pos_batch, obs_ori_batch, ac_batch in dataset.iterbatches(
            (config, goal, obstacle_pos, obstacle_ori, ac),
                include_final_partial_batch=False,
                batch_size=batch_size):
            ob_expert, ac_expert = expert_dataset.get_next_batch(len(ob_batch))
            # update running mean/std for reward_giver
            if hasattr(reward_giver, "obs_rms"):
                reward_giver.obs_rms.update(
                    np.concatenate((ob_batch, ob_expert), 0))
            *newlosses, g = reward_giver.lossandgrad(
                ob_batch, goal_batch, obs_pos_batch, obs_ori_batch, ac_batch,
                ob_expert[:, :dof], ob_expert[:, dof:2 * dof],
                ob_expert[:, -6:-3], ob_expert[:, -3:], ac_expert)
            d_adam.update(allmean(g), d_stepsize)
            d_losses.append(newlosses)
        logger.log(fmt_row(13, np.mean(d_losses, axis=0)))

        lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]
                   )  # local values
        listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)  # list of tuples
        lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
        true_rewbuffer.extend(true_rets)
        lenbuffer.extend(lens)
        rewbuffer.extend(rews)

        logger.record_tabular("EpLenMean", np.mean(lenbuffer))
        logger.record_tabular("EpRewMean", np.mean(rewbuffer))
        logger.record_tabular("EpTrueRewMean", np.mean(true_rewbuffer))
        logger.record_tabular("EpThisIter", len(lens))
        episodes_so_far += len(lens)
        timesteps_so_far += sum(lens)
        iters_so_far += 1

        logger.record_tabular("EpisodesSoFar", episodes_so_far)
        logger.record_tabular("TimestepsSoFar", timesteps_so_far)
        logger.record_tabular("TimeElapsed", time.time() - tstart)

        if rank == 0:
            logger.dump_tabular()
Пример #5
0
def learn(
    *,
    env_type,
    env,
    eval_env,
    plotter_env,
    total_timesteps,
    num_cpu,
    allow_run_as_root,
    bind_to_core,
    seed=None,
    save_interval=5,
    clip_return=True,
    override_params=None,
    load_path=None,
    save_path=None,
    policy_pkl=None,
):

    rank = MPI.COMM_WORLD.Get_rank()
    logger.info('before mpi_fork: rank', rank, 'num_cpu',
                MPI.COMM_WORLD.Get_size())

    if num_cpu > 1:
        if allow_run_as_root:
            whoami = mpi_fork_run_as_root(num_cpu, bind_to_core=bind_to_core)
        else:
            whoami = mpi_fork(num_cpu, bind_to_core=bind_to_core)
        if whoami == 'parent':
            logger.info('parent exiting with code 0...')
            sys.exit(0)

        U.single_threaded_session().__enter__()

    rank = MPI.COMM_WORLD.Get_rank()
    num_cpu = MPI.COMM_WORLD.Get_size()
    logger.info('after mpi_fork: rank', rank, 'num_cpu', num_cpu)

    override_params = override_params or {}

    # Seed everything.
    rank_seed = seed + 1000000 * rank if seed is not None else None
    set_global_seeds(rank_seed)

    # Prepare params.
    params = config.DEFAULT_PARAMS
    env_name = env.spec.id
    params['env_name'] = env_name
    if env_name in config.DEFAULT_ENV_PARAMS:
        params.update(config.DEFAULT_ENV_PARAMS[env_name]
                      )  # merge env-specific parameters in
    params.update(
        **override_params)  # makes it possible to override any parameter
    params['rollout_batch_size'] = env.num_envs
    params['num_cpu'] = num_cpu
    params['env_type'] = env_type
    with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
        json.dump(params, f)
    params = config.prepare_ve_params(params)

    dims = config.configure_dims(params)
    policy, value_ensemble, sample_disagreement_goals_fun, sample_uniform_goals_fun = \
        config.configure_ve_ddpg(dims=dims, params=params, clip_return=clip_return, policy_pkl=policy_pkl)

    if policy_pkl is not None:
        env.set_sample_goals_fun(sample_dummy_goals_fun)
    else:
        env.envs_op("update_goal_sampler",
                    goal_sampler=sample_disagreement_goals_fun)
        eval_env.envs_op("update_goal_sampler",
                         goal_sampler=sample_uniform_goals_fun)
        if plotter_env is not None:
            plotter_env.envs_op("update_goal_sampler",
                                goal_sampler=sample_uniform_goals_fun)

    if load_path is not None:
        tf_util.load_variables(
            os.path.join(load_path, 'final_policy_params.joblib'))
        return play(env=env, policy=policy)

    rollout_params, eval_params, plotter_params = config.configure_rollout_worker_params(
        params)

    rollout_worker = RolloutWorker(env,
                                   policy,
                                   dims,
                                   logger,
                                   monitor=True,
                                   **rollout_params)

    n_cycles = params['n_cycles']
    n_epochs = total_timesteps // n_cycles // rollout_worker.T // rollout_worker.rollout_batch_size
    params['n_epochs'] = n_epochs
    params[
        'total_timesteps'] = n_epochs * n_cycles * rollout_worker.T * rollout_worker.rollout_batch_size

    config.log_params(params, logger=logger)

    if policy_pkl is not None:
        train_fun = train_ve
        evaluator = None
    else:
        train_fun = train
        # construct evaluator
        # assert eval_env.sample_goals_fun is None
        # eval_env.set_sample_goals_fun(sample_dummy_goals_fun)
        evaluator = RolloutWorker(eval_env, policy, dims, logger,
                                  **eval_params)
        if plotter_env is not None:
            raise NotImplementedError
            # from baselines.misc.html_report import HTMLReport
            # plotter_worker = RolloutWorker(plotter_env, policy, dims, logger, **plotter_params)
            # rank = MPI.COMM_WORLD.Get_rank()
            # report = HTMLReport(os.path.join(save_path, f'report-{rank}.html'), images_per_row=8)
            #
            # # report.add_header("{}".format(EXPERIMENT_TYPE))
            # # report.add_text(format_dict(v))
            # plotter = config.configure_plotter(policy, value_ensemble, plotter_worker, params, report)
        else:
            plotter = None

    return train_fun(save_path=save_path,
                     policy=policy,
                     value_ensemble=value_ensemble,
                     rollout_worker=rollout_worker,
                     evaluator=evaluator,
                     n_epochs=n_epochs,
                     n_test_rollouts=params['n_test_rollouts'],
                     n_cycles=params['n_cycles'],
                     n_batches=params['n_batches'],
                     ve_n_batches=params['ve_n_batches'],
                     save_interval=save_interval,
                     plotter=plotter)
Пример #6
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          # 每 train_freq step 更新一次模型
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          # 开始训练前需要收集多少transition的信息
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    print("deepq.py network parameter", network)
    print("deepq.py network_kwargs parameter", network_kwargs)
    # q_func 得到对每个动作的评分
    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        # 输入的 observation_space 表示为 batch * obs.shape * one-hot_dim
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Пример #7
0
def learn(*, network, env, total_timesteps,
    seed=None,
    eval_env=None,
    replay_strategy='future',
    policy_save_interval=5,
    clip_return=True,
    demo_file=None,
    override_params=None,
    load_path=None,
    save_path=None,
    **kwargs
):

    override_params = override_params or {}
    if MPI is not None:
        rank = MPI.COMM_WORLD.Get_rank()
        num_cpu = MPI.COMM_WORLD.Get_size()

    # Seed everything.
    rank_seed = seed + 1000000 * rank if seed is not None else None
    set_global_seeds(rank_seed)

    # Prepare params.
    params = config.DEFAULT_PARAMS
    env_name = env.spec.id
    params['env_name'] = env_name
    params['replay_strategy'] = replay_strategy
    if env_name in config.DEFAULT_ENV_PARAMS:
        params.update(config.DEFAULT_ENV_PARAMS[env_name])  # merge env-specific parameters in
    params.update(**override_params)  # makes it possible to override any parameter
    with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
         json.dump(params, f)
    params = config.prepare_params(params)
    params['rollout_batch_size'] = env.num_envs

    if demo_file is not None:
        params['bc_loss'] = 1
    params.update(kwargs)

    config.log_params(params, logger=logger)

    if num_cpu == 1:
        logger.warn()
        logger.warn('*** Warning ***')
        logger.warn(
            'You are running HER with just a single MPI worker. This will work, but the ' +
            'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) ' +
            'were obtained with --num_cpu 19. This makes a significant difference and if you ' +
            'are looking to reproduce those results, be aware of this. Please also refer to ' +
            'https://github.com/openai/baselines/issues/314 for further details.')
        logger.warn('****************')
        logger.warn()

    dims = config.configure_dims(params)
    policy = config.configure_ddpg(dims=dims, params=params, clip_return=clip_return)
    if load_path is not None:
        tf_util.load_variables(load_path)

    rollout_params = {
        'exploit': False,
        'use_target_net': False,
        'use_demo_states': True,
        'compute_Q': False,
        'T': params['T'],
    }

    eval_params = {
        'exploit': True,
        'use_target_net': params['test_with_polyak'],
        'use_demo_states': False,
        'compute_Q': True,
        'T': params['T'],
    }

    for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:
        rollout_params[name] = params[name]
        eval_params[name] = params[name]

    eval_env = eval_env or env

    rollout_worker = RolloutWorker(env, policy, dims, logger, monitor=True, **rollout_params)
    evaluator = RolloutWorker(eval_env, policy, dims, logger, **eval_params)

    n_cycles = params['n_cycles']
    n_epochs = total_timesteps // n_cycles // rollout_worker.T // rollout_worker.rollout_batch_size

    return train(
        save_path=save_path, policy=policy, rollout_worker=rollout_worker,
        evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'],
        n_cycles=params['n_cycles'], n_batches=params['n_batches'],
        policy_save_interval=policy_save_interval, demo_file=demo_file)
Пример #8
0
 def load(self, path):
     load_variables(path)
Пример #9
0
 def load(self, load_path, extra_vars=None):
     tf_util.load_variables(load_path,
                            extra_vars=extra_vars)  #, sess=self.sess
Пример #10
0
	def evalModel(self, toteps=1, err_thresh=.1):

		# Timestep Data
		actions = []
		observations = []
		rewards = []
		errors = []
		desireds = []
		actuals = []

		# Episode Data
		episodes = []
		ep_num = []

		# Set up parameters for quick initialization
		tp = tg.trainParams()
		tp.num_timesteps = 1
		tp.timesteps_per_actorbatch = 1000
		tp.optim_epochs = 1
		tp.optim_batchsize = 1
		tp.seed = 17
		
		pp = tg.policyParams()

		# Initialize and load model
		if tp.model_path: tp.model_path = None # prevent override of model
		with U.tf.Graph().as_default():		# Allow Re-running of tf
			pi = tg.train(tp, pp, self.env_id)

		# Load Model
		tp.modelName(self.model_name) # Set up name
		self.model_dir = tp.model_dir # Save extracted model dir
		self.model_path = tp.model_path # Save extracted model path
		U.load_variables(tp.model_path)

		# Make Training Log
	#	self.train_log = TrainLog(tp.model_dir)
		
		# Setup gym
		env = gym.make(self.env_id)
		# Seed Set
		rank = MPI.COMM_WORLD.Get_rank()
		workerseed = tp.seed + 1000000 * rank
		env.seed(workerseed)

		env.reset()
		ob = env.reset()  # reset object for pi

		print('----------=================--------------')
		print('rank: ', rank, 'workerseed: ', workerseed)
		print('----------=================--------------')

		#env.render()
		#input('Press enter to continue')

		for eps in range (toteps):
			print(eps)	
			#action = rand_action(env)
			action = pi.act(stochastic=False, ob=ob)[0]
			ob, r, done, info = env.step(action)

			# Initialize records
			ittr = 0
			ep_data={}
			headers = ['stats', 'actions', 'observations', 'rewards', 
				'roll_err', 'pitch_err', 'yaw_err',
				'droll_v', 'dpitch_v', 'dyaw_v',
				'aroll_v', 'apitch_v', 'ayaw_v']
			for header in headers: ep_data[header] = []

			# Run Environment
			while done != True:
				#action = rand_action(env)
#				action = env.action_space.sample() # Random action for ctrl
				action = pi.act(stochastic=False, ob=ob)[0]  # choose action	
				ob, r, done, info = env.step(action)  # perform action
				des = env.omega_target  # desired angular velocities
				actual = env.omega_actual  # current angular velocities 

				# Record Data
				ep_data['actions'].append(action)
				ep_data['observations'].append(ob)
				ep_data['rewards'].append(r)

				# Errors			
				ep_data['roll_err'].append(abs(ob[0]))
				ep_data['pitch_err'].append(abs(ob[1]))
				ep_data['yaw_err'].append(abs(ob[2]))

				# Step functions
				ep_data['droll_v'].append(env.omega_target[0])
				ep_data['dpitch_v'].append(env.omega_target[1])
				ep_data['dyaw_v'].append(env.omega_target[2])
				ep_data['aroll_v'].append(env.omega_actual[0])
				ep_data['apitch_v'].append(env.omega_actual[1])
				ep_data['ayaw_v'].append(env.omega_actual[2])

				ittr += 1


			episodes.append(ep_data) 

			env.reset()
			ep_num.append(eps)
		env.close()
		self.eps = episodes

		self.procEval()
Пример #11
0
import random
if not ITS_THE_REAL_DEAL:
    benchmark.benchmark("imports")

with open("model_params.pkl", "rb") as f:
    learn_params = pkl.load(f)
    env, policy, nenvs, ob_space, ac_space, nstack, model = create_model(
        **learn_params)

params = {}
with open("params.json") as f:
    params = json.load(f)

if not ITS_THE_REAL_DEAL: benchmark.benchmark("create model")

load_variables("actor.ckpt")

if not ITS_THE_REAL_DEAL: benchmark.benchmark("load weights")

devnull.close()
sys.stdout = oldstdout

from replay_parser import gen_obs
""" <<<Game Begin>>> """

# This game object contains the initial game state.
game = hlt.Game()
# At this point "game" variable is populated with initial map data.
# This is a good place to do computationally expensive start-up pre-processing.
# As soon as you call "ready" function below, the 2 second per turn timer will start.
game.ready("MyPythonBot")
Пример #12
0
def learn(env,
          policy_func,
          reward_giver,
          expert_dataset,
          rank,
          pretrained,
          pretrained_weight,
          *,
          g_step,
          d_step,
          entcoeff,
          save_per_iter,
          ckpt_dir,
          timesteps_per_batch,
          task_name,
          gamma,
          lam,
          max_kl,
          cg_iters,
          cg_damping=1e-2,
          vf_stepsize=3e-4,
          d_stepsize=3e-4,
          vf_iters=3,
          max_timesteps=0,
          max_episodes=0,
          max_iters=0,
          callback=None):

    nworkers = MPI.COMM_WORLD.Get_size()
    rank = MPI.COMM_WORLD.Get_rank()
    np.set_printoptions(precision=3)
    # Setup losses and stuff
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space
    pi = policy_func("pi",
                     ob_space,
                     ac_space,
                     reuse=(pretrained_weight != None))
    oldpi = policy_func("oldpi", ob_space, ac_space)
    atarg = tf.placeholder(
        dtype=tf.float32,
        shape=[None])  # Target advantage function (if applicable)
    ret = tf.placeholder(dtype=tf.float32, shape=[None])  # Empirical return

    ob = U.get_placeholder_cached(name="ob")
    ac = pi.pdtype.sample_placeholder([None])

    kloldnew = oldpi.pd.kl(pi.pd)
    ent = pi.pd.entropy()
    meankl = tf.reduce_mean(kloldnew)
    meanent = tf.reduce_mean(ent)
    entbonus = entcoeff * meanent

    vferr = tf.reduce_mean(tf.square(pi.vpred - ret))

    ratio = tf.exp(pi.pd.logp(ac) -
                   oldpi.pd.logp(ac))  # advantage * pnew / pold
    surrgain = tf.reduce_mean(ratio * atarg)

    optimgain = surrgain + entbonus
    losses = [optimgain, meankl, entbonus, surrgain, meanent]
    loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]

    dist = meankl

    all_var_list = pi.get_trainable_variables()
    var_list = [
        v for v in all_var_list
        if v.name.startswith("pi/pol") or v.name.startswith("pi/logstd")
    ]
    vf_var_list = [v for v in all_var_list if v.name.startswith("pi/vff")]
    assert len(var_list) == len(vf_var_list) + 1
    d_adam = MpiAdam(reward_giver.get_trainable_variables())
    vfadam = MpiAdam(vf_var_list)

    get_flat = U.GetFlat(var_list)
    set_from_flat = U.SetFromFlat(var_list)
    klgrads = tf.gradients(dist, var_list)
    flat_tangent = tf.placeholder(dtype=tf.float32,
                                  shape=[None],
                                  name="flat_tan")
    shapes = [var.get_shape().as_list() for var in var_list]
    start = 0
    tangents = []
    for shape in shapes:
        sz = U.intprod(shape)
        tangents.append(tf.reshape(flat_tangent[start:start + sz], shape))
        start += sz
    gvp = tf.add_n([
        tf.reduce_sum(g * tangent)
        for (g, tangent) in zipsame(klgrads, tangents)
    ])  # pylint: disable=E1111
    fvp = U.flatgrad(gvp, var_list)

    assign_old_eq_new = U.function(
        [], [],
        updates=[
            tf.assign(oldv, newv)
            for (oldv,
                 newv) in zipsame(oldpi.get_variables(), pi.get_variables())
        ])
    compute_losses = U.function([ob, ac, atarg], losses)
    compute_lossandgrad = U.function([ob, ac, atarg], losses +
                                     [U.flatgrad(optimgain, var_list)])
    compute_fvp = U.function([flat_tangent, ob, ac, atarg], fvp)
    compute_vflossandgrad = U.function([ob, ret],
                                       U.flatgrad(vferr, vf_var_list))

    def allmean(x):
        assert isinstance(x, np.ndarray)
        out = np.empty_like(x)
        MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
        out /= nworkers
        return out

    U.initialize()
    th_init = get_flat()
    MPI.COMM_WORLD.Bcast(th_init, root=0)
    set_from_flat(th_init)
    d_adam.sync()
    vfadam.sync()
    if rank == 0:
        print("Init param sum", th_init.sum(), flush=True)

    # Prepare for rollouts
    # ----------------------------------------
    seg_gen = traj_segment_generator(pi,
                                     env,
                                     reward_giver,
                                     timesteps_per_batch,
                                     stochastic=True)

    episodes_so_far = 0
    timesteps_so_far = 0
    iters_so_far = 0
    tstart = time.time()
    lenbuffer = deque(maxlen=40)  # rolling buffer for episode lengths
    rewbuffer = deque(maxlen=40)  # rolling buffer for episode rewards
    true_rewbuffer = deque(maxlen=40)
    test_true_rewbuffer = deque(maxlen=40)
    test_lenbuffer = deque(maxlen=40)

    assert sum([max_iters > 0, max_timesteps > 0, max_episodes > 0]) == 1

    # g_loss_stats = stats(loss_names)
    # d_loss_stats = stats(reward_giver.loss_name)
    ep_stats = stats(["True_rewards", "Rewards", "Episode_length"])
    # if provide pretrained weight
    if pretrained_weight is not None:
        U.load_variables(pretrained_weight, variables=pi.get_variables())

    best = -2000
    save_ind = 0
    max_save = 3
    while True:
        if callback: callback(locals(), globals())
        # if max_timesteps and timesteps_so_far >= max_timesteps:
        #     break
        # elif max_episodes and episodes_so_far >= max_episodes:
        #     break
        # elif max_iters and iters_so_far >= max_iters:
        #     break
        #
        if max_iters and iters_so_far >= max_iters:
            break

        logger.log("********** Iteration %i ************" % iters_so_far)

        def fisher_vector_product(p):
            return allmean(compute_fvp(p, *fvpargs)) + cg_damping * p

        # ------------------ Update G ------------------
        logger.log("Optimizing Policy...")
        for _ in range(g_step):
            seg = seg_gen.__next__()

            if (iters_so_far == 0) or ((iters_so_far + 1) % 60 == 0):
                obs_list = []
                acs_list = []
                len_list = []
                ret_list = []
                for _ in tqdm(range(50)):
                    from run_expert import traj_1_generator
                    traj = traj_1_generator(pi, env, 1000, stochastic=False)
                    obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj[
                        'ep_len'], traj['ep_ret']
                    obs_list.append(obs)
                    acs_list.append(acs)
                    len_list.append(ep_len)
                    ret_list.append(ep_ret)

                avg_len = np.mean(len_list)
                avg_ret = np.mean(ret_list)
                std_ret = np.std(ret_list)
                logger.record_tabular("TestEpTrueRewMean", avg_ret)
                logger.record_tabular("TestEpTrueRewStd", std_ret)

            else:
                logger.record_tabular("TestEpTrueRewMean", -1)
                logger.record_tabular("TestEpTrueRewStd", -1)

            #report stats and save policy if any good
            lrlocal = (seg["ep_lens"], seg["ep_rets"], seg["ep_true_rets"]
                       )  # local values
            listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)  # list of tuples
            lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))
            true_rewbuffer.extend(true_rets)
            lenbuffer.extend(lens)
            rewbuffer.extend(rews)

            true_rew_avg = np.mean(true_rewbuffer)
            true_rew_std = np.std(true_rewbuffer)
            logger.record_tabular("EpLenMean", np.mean(lenbuffer))
            logger.record_tabular("EpRewMean", np.mean(rewbuffer))
            logger.record_tabular("EpRewStd", np.std(rewbuffer))
            logger.record_tabular("EpTrueRewMean", true_rew_avg)
            logger.record_tabular("EpTrueStd", true_rew_std)
            logger.record_tabular("EpThisIter", len(lens))
            episodes_so_far += len(lens)
            timesteps_so_far += sum(lens)
            iters_so_far += 1

            logger.record_tabular("EpisodesSoFar", episodes_so_far)
            logger.record_tabular("TimestepsSoFar", timesteps_so_far)
            logger.record_tabular("TimeElapsed", time.time() - tstart)
            logger.record_tabular("Best so far", best)

            # # Save model
            # if ckpt_dir is not None and true_rew_avg >= best and len(true_rewbuffer) > 30:
            #     best = true_rew_avg
            #     fname = os.path.join(ckpt_dir, task_name)
            #     os.makedirs(os.path.dirname(fname), exist_ok=True)
            #     pi.save_policy(fname)

            # Save model
            if ckpt_dir is not None:
                fname = os.path.join(ckpt_dir, task_name)
                os.makedirs(os.path.dirname(fname), exist_ok=True)
                if true_rew_avg >= best:
                    best = true_rew_avg
                    pi.save_policy(fname + "_" + str(save_ind))
                    pi.save_policy(fname + "_best")
                save_ind = (save_ind + 1) % max_save
                if (iters_so_far + 1) % 1000 == 0:
                    pi.save_policy(fname + "_iter_" + str(iters_so_far + 1))

            #compute gradient towards next policy
            add_vtarg_and_adv(seg, gamma, lam)
            # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
            ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[
                "tdlamret"]
            vpredbefore = seg[
                "vpred"]  # predicted value function before udpate
            atarg = (atarg - atarg.mean()) / atarg.std(
            )  # standardized advantage function estimate

            if hasattr(pi, "ob_rms"):
                pi.ob_rms.update(ob)  # update running mean/std for policy

            args = seg["ob"], seg["ac"], atarg
            fvpargs = [arr[::5] for arr in args]

            assign_old_eq_new(
            )  # set old parameter values to new parameter values

            *lossbefore, g = compute_lossandgrad(*args)
            lossbefore = allmean(np.array(lossbefore))
            g = allmean(g)
            if np.allclose(g, 0):
                logger.log("Got zero gradient. not updating")
            else:
                stepdir = cg(fisher_vector_product,
                             g,
                             cg_iters=cg_iters,
                             verbose=False)
                assert np.isfinite(stepdir).all()
                shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
                lm = np.sqrt(shs / max_kl)
                fullstep = stepdir / lm
                expectedimprove = g.dot(fullstep)
                surrbefore = lossbefore[0]
                stepsize = 1.0
                thbefore = get_flat()
                for _ in range(10):
                    thnew = thbefore + fullstep * stepsize
                    set_from_flat(thnew)
                    meanlosses = surr, kl, *_ = allmean(
                        np.array(compute_losses(*args)))
                    improve = surr - surrbefore
                    logger.log("Expected: %.3f Actual: %.3f" %
                               (expectedimprove, improve))
                    if not np.isfinite(meanlosses).all():
                        logger.log("Got non-finite value of losses -- bad!")
                    elif kl > max_kl * 1.5:
                        logger.log("violated KL constraint. shrinking step.")
                    elif improve < 0:
                        logger.log("surrogate didn't improve. shrinking step.")
                    else:
                        logger.log("Stepsize OK!")
                        break
                    stepsize *= .5
                else:
                    logger.log("couldn't compute a good step")
                    set_from_flat(thbefore)
                if nworkers > 1 and iters_so_far % 20 == 0:
                    paramsums = MPI.COMM_WORLD.allgather(
                        (thnew.sum(),
                         vfadam.getflat().sum()))  # list of tuples
                    assert all(
                        np.allclose(ps, paramsums[0]) for ps in paramsums[1:])
            for _ in range(vf_iters):
                for (mbob, mbret) in dataset.iterbatches(
                    (seg["ob"], seg["tdlamret"]),
                        include_final_partial_batch=False,
                        batch_size=128):
                    if hasattr(pi, "ob_rms"):
                        pi.ob_rms.update(
                            mbob)  # update running mean/std for policy
                    g = allmean(compute_vflossandgrad(mbob, mbret))
                    vfadam.update(g, vf_stepsize)

            if rank == 0:
                logger.dump_tabular()

        g_losses = meanlosses
        for (lossname, lossval) in zip(loss_names, meanlosses):
            logger.record_tabular(lossname, lossval)
        logger.record_tabular("ev_tdlam_before",
                              explained_variance(vpredbefore, tdlamret))

        pi.save_policy(fname + "_converged")
        # ------------------ Update D ------------------
        batch_size = len(ob) // d_step
        d_losses = [
        ]  # list of tuples, each of which gives the loss for a minibatch
        for ob_batch, ac_batch in dataset.iterbatches(
            (ob, ac), include_final_partial_batch=False,
                batch_size=batch_size):
            ob_expert, ac_expert = expert_dataset.next_batch(len(ob_batch))
            # update running mean/std for reward_giver
            if hasattr(reward_giver, "obs_rms"):
                reward_giver.obs_rms.update(
                    np.concatenate((ob_batch, ob_expert), 0))
            *newlosses, g = reward_giver.lossandgrad(ob_batch, ac_batch,
                                                     ob_expert, ac_expert)
            d_adam.update(allmean(g), d_stepsize)
            d_losses.append(newlosses)
        logger.log(fmt_row(13, reward_giver.loss_name))
        logger.log(fmt_row(13, np.mean(d_losses, axis=0)))
Пример #13
0
def learn(*,
          network,
          env,
          total_timesteps,
          seed=None,
          eval_env=None,
          replay_strategy='future',
          policy_save_interval=5,
          clip_return=True,
          demo_file=None,
          override_params=None,
          load_path=None,
          save_path=None,
          params=None,
          **kwargs):
    override_params = override_params or {}
    if MPI is not None:
        rank = MPI.COMM_WORLD.Get_rank()
        num_cpu = MPI.COMM_WORLD.Get_size()

    # Seed everything.
    rank_seed = seed + 1000000 * rank if seed is not None else None
    set_global_seeds(rank_seed)

    # Prepare params.
    params = {
        # env
        'max_u': 1.,  # max absolute value of actions on different coordinates
        # ddpg
        'layers': 3,  # number of layers in the critic/actor networks
        'hidden': 256,  # number of neurons in each hidden layers
        'network_class': 'baselines.her.actor_critic:ActorCritic',
        'Q_lr': 0.001,  # critic learning rate
        'pi_lr': 0.001,  # actor learning rate
        'buffer_size': int(1E6),  # for experience replay
        'polyak': 0.95,  # polyak averaging coefficient
        'action_l2':
        1.0,  # quadratic penalty on actions (before rescaling by max_u)
        'clip_obs': 200.,
        'scope': 'ddpg',  # can be tweaked for testing
        'relative_goals': False,
        # training
        'n_cycles': 50,  # per epoch
        'rollout_batch_size': 2,  # per mpi thread
        'n_batches': 40,  # training batches per cycle
        'batch_size':
        256,  # per mpi thread, measured in transitions and reduced to even multiple of chunk_length.
        'n_test_rollouts':
        10,  # number of test rollouts per epoch, each consists of rollout_batch_size rollouts
        'test_with_polyak': False,  # run test episodes with the target network
        # exploration
        'random_eps': 0.2,  # percentage of time a random action is taken
        'noise_eps':
        0.3,  # std of gaussian noise added to not-completely-random actions as a percentage of max_u
        # HER
        'replay_strategy': 'future',  # supported modes: future, none
        'replay_k':
        4,  # number of additional goals used for replay, only used if off_policy_data=future
        # normalization
        'norm_eps': 0.01,  # epsilon used for observation normalization
        'norm_clip': 5,  # normalized observations are cropped to this values
        'bc_loss':
        0,  # whether or not to use the behavior cloning loss as an auxilliary loss
        'q_filter':
        0,  # whether or not a Q value filter should be used on the Actor outputs
        'num_demo': 25,  # number of expert demo episodes
        'demo_batch_size':
        128,  #number of samples to be used from the demonstrations buffer, per mpi thread 128/1024 or 32/256
        'prm_loss_weight': 0.001,  #Weight corresponding to the primary loss
        'aux_loss_weight':
        0.0078,  #Weight corresponding to the auxilliary loss also called the cloning loss
        'perturb': kwargs['pert_type'],
        'n_actions': kwargs['n_actions'],
    }
    params['replay_strategy'] = replay_strategy
    if env is not None:
        env_name = env.spec.id
        params['env_name'] = env_name
        if env_name in config.DEFAULT_ENV_PARAMS:
            params.update(config.DEFAULT_ENV_PARAMS[env_name]
                          )  # merge env-specific parameters in
    else:
        params['env_name'] = 'NuFingers_Experiment'
    params.update(
        **override_params)  # makes it possible to override any parameter
    with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
        json.dump(params, f)

    if demo_file is not None:
        params['bc_loss'] = 1
        params['q_filter'] = 1
        params['n_cycles'] = 20
        params['random_eps'] = 0.1  # chip: ON
        params['noise_eps'] = 0.1  # chip: ON
        # params['batch_size']: 1024
    params = config.prepare_params(params)
    params['rollout_batch_size'] = 1
    params.update(kwargs)

    config.log_params(params, logger=logger)

    if num_cpu == 1:
        logger.warn()
        logger.warn('*** Warning ***')
        logger.warn(
            'You are running HER with just a single MPI worker. This will work, but the '
            +
            'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) '
            +
            'were obtained with --num_cpu 19. This makes a significant difference and if you '
            +
            'are looking to reproduce those results, be aware of this. Please also refer to '
            +
            'https://github.com/openai/baselines/issues/314 for further details.'
        )
        logger.warn('****************')
        logger.warn()

    if env is not None:
        dims = config.configure_dims(params)
    else:
        dims = dict(o=15, u=4, g=7, info_is_success=1)
    policy = config.configure_ddpg(dims=dims,
                                   params=params,
                                   clip_return=clip_return)
    if load_path is not None:
        tf_util.load_variables(load_path)

    rollout_params = {
        'exploit': False,
        'use_target_net': False,
        'use_demo_states': True,
        'compute_Q': False,
        'T': params['T'],
    }

    eval_params = {
        'exploit': True,
        'use_target_net': params['test_with_polyak'],
        'use_demo_states': False,
        'compute_Q': True,
        'T': params['T'],
    }

    for name in [
            'T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps'
    ]:
        rollout_params[name] = params[name]
        eval_params[name] = params[name]

    eval_env = eval_env or env

    print("NAME={}".format(params['env_name']))

    print(rollout_params)

    if params['env_name'].find('NuFingers_Experiment') == -1:
        rollout_worker = RolloutWorker(env,
                                       policy,
                                       dims,
                                       logger,
                                       monitor=True,
                                       **rollout_params)
        evaluator = RolloutWorker(eval_env, policy, dims, logger,
                                  **eval_params)
    else:
        rollout_worker = RolloutNuFingers(policy,
                                          dims,
                                          logger,
                                          monitor=True,
                                          **rollout_params)
        evaluator = RolloutNuFingers(policy, dims, logger, **eval_params)

    n_cycles = params['n_cycles']
    n_epochs = total_timesteps // n_cycles // rollout_worker.T // rollout_worker.rollout_batch_size

    return train(save_path=save_path,
                 policy=policy,
                 rollout_worker=rollout_worker,
                 evaluator=evaluator,
                 n_epochs=n_epochs,
                 n_test_rollouts=params['n_test_rollouts'],
                 n_cycles=params['n_cycles'],
                 n_batches=params['n_batches'],
                 policy_save_interval=policy_save_interval,
                 demo_file=demo_file)
Пример #14
0
 def load(self, load_path):
     tf_util.load_variables(load_path, sess=self.sess)
Пример #15
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          expert_lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.5,
          initial_exploration_p=1.0,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1,
          gamma=1.0,
          target_network_update_freq=100,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          double_q=True,
          obs_dim=None,
          **network_kwargs
            ):
    """Train a bootstrap-dqn model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    nenvs = env.num_envs
    print("Bootstrap DQN with {} envs".format(nenvs))

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph
    # import IPython; IPython.embed()
    #assert isinstance(env.envs[0].env.env.env, ExplicitBayesEnv)
    #belief_space = env.envs[0].env.env.env.belief_space
    #observation_space = env.envs[0].env.env.env.internal_observation_space

    obs_space = env.observation_space

    assert obs_dim is not None

    observation_space = Box(obs_space.low[:obs_dim], obs_space.high[:obs_dim], dtype=np.float32)
    belief_space = Box(obs_space.low[obs_dim:], obs_space.high[obs_dim:], dtype=np.float32)

    num_experts = belief_space.high.size

    print("Num experts", num_experts)

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    def make_bel_ph(name):
        return ObservationInput(belief_space, name=name)

    q_func = build_q_func(network, num_experts, **network_kwargs)

    print('=============== got qfunc ============== ')

    act, train, update_target, debug = residual_bqn_separate_expert.build_train(
        make_obs_ph=make_obs_ph,
        make_bel_ph=make_bel_ph,
        q_func=q_func,
        num_experts=num_experts,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        expert_optimizer=tf.train.AdamOptimizer(learning_rate=expert_lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        double_q=double_q
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size, num_experts)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=initial_exploration_p,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()

    episode_reward = np.zeros(nenvs, dtype = np.float32)
    saved_mean_reward = None
    reset = True
    epoch_episode_rewards = []
    epoch_episode_steps = []
    epoch_actions = []
    epoch_episodes = 0
    episode_rewards_history = deque(maxlen=100)
    episode_step = np.zeros(nenvs, dtype = int)
    episodes = 0 #scalar


    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        print("Model will be saved at " , model_file)
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))
            print('Loaded model from {}'.format(load_path))

        t = 0
        while t < total_timesteps:
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            update_eps = exploration.value(t)
            update_param_noise_threshold = 0.

            obs = env.reset()
            obs, bel = obs[:, :-belief_space.shape[0]], obs[:, -belief_space.shape[0]:]

            for m in range(100):
                action, q_values, expert_q_values = act(np.array(obs)[None], np.array(bel)[None], update_eps=update_eps, **kwargs)
                env_action = action

                new_obs, rew, done, info = env.step(env_action)
                new_obs, new_bel = new_obs[:, :-belief_space.shape[0]], new_obs[:, -belief_space.shape[0]:]

                expert = np.array([_info['expert'] for _info in info])
                # Store transition in the replay buffer.
                replay_buffer.add(obs, bel, action, rew, new_obs, new_bel, done, expert)


                if np.random.rand() < 0.01:
                    # write to file
                    with open('tiger_rbqn_sep_exp.csv', 'a') as f:
                        out = str(expert[0]) + ',' + ','.join(str(np.around(x,1)) for x in [bel[0], obs[0], q_values[0], expert_q_values[:, 0].ravel()])
                        f.write(out + "\n")
                    print(out)


                obs = new_obs
                bel = new_bel

                episode_reward += rew
                episode_step += 1


                for d in range(len(done)):
                    if done[d]:
                        epoch_episode_rewards.append(episode_reward[d])
                        episode_rewards_history.append(episode_reward[d])
                        epoch_episode_steps.append(episode_step[d])
                        episode_reward[d] = 0.
                        episode_step[d] = 0
                        epoch_episodes += 1
                        episodes += 1
                        episode_step[d] = 0
                        epoch_episodes += 1
                        episodes += 1

            t += 100 * nenvs

            # import IPython; IPython.embed(); import sys; sys.exit(0)

            if t > learning_starts and t % train_freq == 0:
                # for _ in range(5):
                # expert_i = np.random.choice(num_experts)
                for expert_i in range(num_experts):
                    # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                    if prioritized_replay:
                        experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t), expert=expert_i)
                        if experience is None:
                            continue
                        obses_t, bels_t, actions, rewards, obses_tp1, bels_tp1, dones, weights, batch_idxes = experience
                    else:
                        experience = replay_buffer.sample(batch_size, expert=expert_i)
                        if experience is None:
                            continue
                        obses_t, bels_t, actions, rewards, obses_tp1, bels_tp1, dones, exps = experience
                        weights, batch_idxes = np.ones_like(rewards), None

                    assert np.all(exps == expert_i)
                    td_errors, expert_td_errors = train(obses_t, bels_t, actions, rewards, obses_tp1, bels_tp1, dones, weights, expert_i)


                    if np.random.rand() < 0.01:
                        print("TD error", td_errors, expert_td_errors)
                    if prioritized_replay:
                        new_priorities = np.abs(td_errors) + prioritized_replay_eps
                        replay_buffer.update_priorities(batch_idxes, new_priorities)
                """

                obses_t, bels_t, actions, rewards, obses_tp1, bels_tp1, dones, exps = replay_buffer.sample(batch_size, expert=None)
                weights, batch_idxes = np.ones_like(rewards), None

                td_errors = train(obses_t, bels_t, actions, rewards, obses_tp1, bels_tp1, dones, weights, np.array([0]))
                """

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards_history), 2)
            num_episodes = episodes

            if print_freq is not None:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()
                print("episodes   ", num_episodes, "steps {}/{}".format(t, total_timesteps))
                print("mean reward", mean_100ep_reward)
                print("% time spent exploring", int(100 * exploration.value(t)))

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    # if print_freq is not None:
                    print("Saving model due to mean reward increase: {} -> {}".format(
                               saved_mean_reward, mean_100ep_reward))
                    print("saving model")
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Пример #16
0
def learn(env,
          network,
          seed=None,
          lr=1e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          multiplayer=False,
          callback=None,
          load_path=None,
          load_path_1=None,
          load_path_2=None,
          **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """

    # This was all handled in not the most elegant way
    # Variables have a _1 or _2 appended to them to separate them
    # and a bunch of if statementss to have the _2 variables not do anything in single-player

    # when in multiplayer Space Invaders, need to not reward players for other player dying
    isSpaceInvaders = False
    if "SpaceInvaders" in str(env):
        isSpaceInvaders = True

    # put a limit on the amount of memory used, otherwise TensorFlow will consume nearly everything
    # this leaves 1 GB free on my computer, others may need to change it

    # Create all the functions necessary to train the model
    # Create two separate TensorFlow sessions
    graph_1 = tf.Graph()
    sess_1 = tf.Session(graph=graph_1)
    if multiplayer:
        graph_2 = tf.Graph()
        sess_2 = tf.Session(graph=graph_2)
    else:
        # set session 2 to None if it's not being used
        sess_2 = None
    set_global_seeds(seed)
    # specify the q functions as separate objects
    q_func_1 = build_q_func(network, **network_kwargs)
    if multiplayer:
        q_func_2 = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    # build everything for the first model
    # pass in the session and the "_1" suffix
    act_1, train_1, update_target_1, debug_1 = deepq.build_train(
        sess=sess_1,
        make_obs_ph=make_obs_ph,
        q_func=q_func_1,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise,
        scope="deepq")
    # a lot of if multiplayer statements duplicating these actions for a second network
    # pass in session 2 and "_2" instead
    if multiplayer:
        act_2, train_2, update_target_2, debug_2 = deepq.build_train(
            sess=sess_2,
            make_obs_ph=make_obs_ph,
            q_func=q_func_2,
            num_actions=env.action_space.n,
            optimizer=tf.train.AdamOptimizer(learning_rate=lr),
            gamma=gamma,
            grad_norm_clipping=10,
            param_noise=param_noise,
            scope="deepq")

    # separate act_params for each wrapper
    act_params_1 = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func_1,
        'num_actions': env.action_space.n,
    }
    if multiplayer:
        act_params_2 = {
            'make_obs_ph': make_obs_ph,
            'q_func': q_func_2,
            'num_actions': env.action_space.n,
        }
    # make the act wrappers
    act_1 = ActWrapper(act_1, act_params_1)
    if multiplayer:
        act_2 = ActWrapper(act_2, act_params_2)
    # I need to return something if it's single-player
    else:
        act_2 = None

    # Create the replay buffer
    # separate replay buffers are required for each network
    # this is required for competitive because the replay buffers hold rewards
    # and player 2 has different rewards than player 1
    if prioritized_replay:
        replay_buffer_1 = PrioritizedReplayBuffer(
            buffer_size, alpha=prioritized_replay_alpha)
        if multiplayer:
            replay_buffer_2 = PrioritizedReplayBuffer(
                buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer_1 = ReplayBuffer(buffer_size)
        if multiplayer:
            replay_buffer_2 = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    # initialize both sessions
    U.initialize(sess_1)
    if multiplayer:
        U.initialize(sess_2)
    # the session was passed into these functions when they were created
    # the separate update functions work within the different sessions
    update_target_1()
    if multiplayer:
        update_target_2()

    # keep track of rewards for both models separately
    episode_rewards_1 = [0.0]
    saved_mean_reward_1 = None
    if multiplayer:
        episode_rewards_2 = [0.0]
        saved_mean_reward_2 = None
    obs = env.reset()
    reset = True

    # storing stuff in a temporary directory while it's working
    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td
        model_file_1 = os.path.join(td, "model_1")
        temp_file_1 = os.path.join(td, "temp_1")
        model_saved_1 = False
        if multiplayer:
            model_file_2 = os.path.join(td, "model_2")
            temp_file_2 = os.path.join(td, "temp_2")
            model_saved_2 = False

        if tf.train.latest_checkpoint(td) is not None:
            if multiplayer:
                # load both models if multiplayer is on
                load_variables(model_file_1, sess_1)
                logger.log('Loaded model 1 from {}'.format(model_file_1))
                model_saved_1 = True
                load_variables(model_file_2, sess_2)
                logger.log('Loaded model 2 from {}'.format(model_file_2))
                model_saved_2 = True
            # otherwise just load the first one
            else:
                load_variables(model_file_1, sess_1)
                logger.log('Loaded model from {}'.format(model_file_1))
                model_saved_1 = True
        # I have separate load variables for single-player and multiplayer
        # this should be None if multiplayer is on
        elif load_path is not None:
            load_variables(load_path, sess_1)
            logger.log('Loaded model from {}'.format(load_path))
        # load the separate models in for multiplayer
        # should load the variables into the appropriate sessions

        # my format may restrict things to working properly only when a Player 1 model is loaded into session 1, and same for Player 2
        # however, in practice, the models won't work properly otherwise
        elif multiplayer:
            if load_path_1 is not None:
                load_variables(load_path_1, sess_1)
                logger.log('Loaded model 1 from {}'.format(load_path_1))
            if load_path_2 is not None:
                load_variables(load_path_2, sess_2)
                logger.log('Loaded model 2 from {}'.format(load_path_2))

        # actual training starts here
        for t in range(total_timesteps):
            # use this for updating purposes
            actual_t = t + 1
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            # receive model 1's action based on the model and observation
            action_1 = act_1(np.array(obs)[None],
                             update_eps=update_eps,
                             **kwargs)[0]
            env_action_1 = action_1
            # do the same for model 2 if in multiplayer
            if multiplayer:
                action_2 = act_2(np.array(obs)[None],
                                 update_eps=update_eps,
                                 **kwargs)[0]
                env_action_2 = action_2
            reset = False
            # apply actions to the environment
            if multiplayer:
                new_obs, rew_1, rew_2, done, _ = env.step(
                    env_action_1, env_action_2)
            # apply single action if there isn't a second model
            else:
                new_obs, rew_1, rew_2, done, _ = env.step(env_action_1)

            # manual clipping for Space Invaders multiplayer
            if isSpaceInvaders and multiplayer:
                # don't reward a player when the other player dies
                # change the reward to 0
                # the only time either player will get rewarded 200 is when the other player dies
                if rew_1 >= 200:
                    rew_1 = rew_1 - 200.0
                if rew_2 >= 200:
                    rew_2 = rew_2 - 200.0
                # manually clip the rewards using the sign function
                rew_1 = np.sign(rew_1)
                rew_2 = np.sign(rew_2)
                combo_factor = 0.25
                rew_1_combo = rew_1 + combo_factor * rew_2
                rew_2_combo = rew_2 + combo_factor * rew_1
                rew_1 = rew_1_combo
                rew_2 = rew_2_combo

            # Store transition in the replay buffers
            replay_buffer_1.add(obs, action_1, rew_1, new_obs, float(done))
            if multiplayer:
                # pass reward_2 to the second player
                # this reward will vary based on the game
                replay_buffer_2.add(obs, action_2, rew_2, new_obs, float(done))
            obs = new_obs
            # separate rewards for each model
            episode_rewards_1[-1] += rew_1
            if multiplayer:
                episode_rewards_2[-1] += rew_2
            if done:
                obs = env.reset()
                episode_rewards_1.append(0.0)
                if multiplayer:
                    episode_rewards_2.append(0.0)
                reset = True
            if actual_t > learning_starts and actual_t % train_freq == 0:

                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                # sample from the two replay buffers
                if prioritized_replay:
                    experience_1 = replay_buffer_1.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t_1, actions_1, rewards_1, obses_tp1_1, dones_1,
                     weights_1, batch_idxes_1) = experience_1
                    # keep all the variables with separate names
                    if multiplayer:
                        experience_2 = replay_buffer_2.sample(
                            batch_size, beta=beta_schedule.value(t))
                        (obses_t_2, actions_2, rewards_2, obses_tp1_2, dones_2,
                         weights_2, batch_idxes_2) = experience_2
                # do the same if there's no prioritization
                else:
                    obses_t_1, actions_1, rewards_1, obses_tp1_1, dones_1 = replay_buffer_1.sample(
                        batch_size)
                    weights_1, batch_idxes_1 = np.ones_like(rewards_1), None
                    if multiplayer:
                        obses_t_2, actions_2, rewards_2, obses_tp1_2, dones_2 = replay_buffer_2.sample(
                            batch_size)
                        weights_2, batch_idxes_2 = np.ones_like(
                            rewards_2), None
                # actually train the model based on the samples
                td_errors_1 = train_1(obses_t_1, actions_1, rewards_1,
                                      obses_tp1_1, dones_1, weights_1)
                if multiplayer:
                    td_errors_2 = train_2(obses_t_2, actions_2, rewards_2,
                                          obses_tp1_2, dones_2, weights_2)
                # give new priority weights to the observations
                if prioritized_replay:
                    new_priorities_1 = np.abs(
                        td_errors_1) + prioritized_replay_eps
                    replay_buffer_1.update_priorities(batch_idxes_1,
                                                      new_priorities_1)
                    if multiplayer:
                        new_priorities_2 = np.abs(
                            td_errors_2) + prioritized_replay_eps
                        replay_buffer_2.update_priorities(
                            batch_idxes_2, new_priorities_2)

            if actual_t > learning_starts and actual_t % target_network_update_freq == 0:
                # Update target networks periodically.
                update_target_1()
                if multiplayer:
                    update_target_2()

            # this section is for the purposes of logging stuff
            # calculate the average reward over the last 100 episodes
            mean_100ep_reward_1 = round(np.mean(episode_rewards_1[-101:-1]), 1)
            if multiplayer:
                mean_100ep_reward_2 = round(
                    np.mean(episode_rewards_2[-101:-1]), 1)
            num_episodes = len(episode_rewards_1)
            # every given number of episodes log and print out the appropriate stuff
            if done and print_freq is not None and len(
                    episode_rewards_1) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                # print out both rewards if multiplayer
                if multiplayer:
                    logger.record_tabular("mean 100 episode reward 1",
                                          mean_100ep_reward_1)
                    logger.record_tabular("mean 100 episode reward 2",
                                          mean_100ep_reward_2)
                else:
                    logger.record_tabular("mean 100 episode reward",
                                          mean_100ep_reward_1)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            # save best-performing version of each model
            # I've opted out of this for competitive multiplayer because it's difficult to determine what's "best"

            if (checkpoint_freq is not None and actual_t > learning_starts
                    and num_episodes > 100
                    and actual_t % checkpoint_freq == 0):
                # if there's a best reward, save it as the new best model
                if saved_mean_reward_1 is None or mean_100ep_reward_1 > saved_mean_reward_1:
                    if print_freq is not None:
                        if multiplayer:
                            logger.log(
                                "Saving model 1 due to mean reward increase: {} -> {}"
                                .format(saved_mean_reward_1,
                                        mean_100ep_reward_1))
                        else:
                            logger.log(
                                "Saving model due to mean reward increase: {} -> {}"
                                .format(saved_mean_reward_1,
                                        mean_100ep_reward_1))
                    save_variables(model_file_1, sess_1)
                    model_saved_1 = True
                    saved_mean_reward_1 = mean_100ep_reward_1

                if multiplayer and (saved_mean_reward_2 is None or
                                    mean_100ep_reward_2 > saved_mean_reward_2):
                    if print_freq is not None:
                        logger.log(
                            "Saving model 2 due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward_2, mean_100ep_reward_2))
                    save_variables(model_file_2, sess_2)
                    model_saved_2 = True
                    saved_mean_reward_2 = mean_100ep_reward_2

        # restore models at the end to the best performers
        if model_saved_1:
            if print_freq is not None:
                logger.log("Restored model 1 with mean reward: {}".format(
                    saved_mean_reward_1))
            load_variables(model_file_1, sess_1)
        if multiplayer and model_saved_2:
            if print_freq is not None:
                logger.log("Restored model 2 with mean reward: {}".format(
                    saved_mean_reward_2))
            load_variables(model_file_2, sess_2)
    return act_1, act_2, sess_1, sess_2
Пример #17
0
def learn(  # env flags
        env,
        raw_env,
        use_2D_env=True,
        use_multiple_starts=False,
        use_rich_reward=False,
        total_timesteps=100000,
        # dqn
        network=identity_fn,
        exploration_fraction=0.1,
        exploration_final_eps=0.02,
        # hr
        use_feedback=False,
        use_real_feedback=False,
        only_use_hr_until=int(1e3),
        trans_to_rl_in=int(2e4),
        good_feedback_acc=0.7,
        bad_feedback_acc=0.7,
        # dqn training
        lr=5e-4,
        batch_size=32,
        dqn_epochs=3,
        train_freq=1,
        target_network_update_freq=500,
        learning_starts=1000,
        param_noise=True,
        gamma=1.0,
        # hr training
        feedback_lr=1e-3,
        feedback_epochs=4,
        feedback_batch_size=16,
        feedback_minibatch_size=8,
        min_feedback_buffer_size=32,
        feedback_training_prop=0.7,
        feedback_training_new_prop=0.4,
        # replay buffer
        buffer_size=50000,
        prioritized_replay=False,
        prioritized_replay_alpha=0.6,
        prioritized_replay_beta0=0.4,
        prioritized_replay_beta_iters=None,
        prioritized_replay_eps=1e-6,
        # rslts saving and others
        checkpoint_freq=10000,
        checkpoint_path=None,
        print_freq=100,
        load_path=None,
        callback=None,
        seed=0,
        **network_kwargs):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    # sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)
    hr_func = build_hr_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    observation_space.dtype = np.float32

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train_rl, train_hr, evaluate_hr, update_target, debug = build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        hr_func=hr_func,
        num_actions=env.action_space.n,
        rl_optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        hr_optimizer=tf.train.AdamOptimizer(learning_rate=feedback_lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'hr_func': hr_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    obs, cor = obs['obs'], obs['nonviz_sensor']
    reset = True

    if use_feedback and use_real_feedback:
        import pylsl
        print("looking for an EEG_Pred stream...", end="", flush=True)
        feedback_LSL_stream = pylsl.StreamInlet(
            pylsl.resolve_stream('type', 'EEG_Pred')[0])
        print(" done")

    target_position = raw_env.robot.get_target_position()
    if use_2D_env:
        judge_action, *_ = run_dijkstra(raw_env, target_position)
    else:
        judge_action = judge_action_1D(raw_env, target_position)

    state_action_buffer = deque(maxlen=100)
    action_idx_buffer = deque(maxlen=100)
    feedback_buffer_train = []
    feedback_buffer_valid = []
    performance = {"feedback": [], "sparse_reward": [], "rich_reward": []}
    epi_feedback_test_num = 0

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}

            if use_feedback:
                update_rl_importance = (t - only_use_hr_until) / trans_to_rl_in
                update_rl_importance = np.clip(update_rl_importance, 0, 1)
                kwargs['update_rl_importance'] = update_rl_importance
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(
                    t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs[
                    'update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True

            action = act(np.array(obs)[None], update_eps=update_eps,
                         **kwargs)[0]
            env_action = action
            reset = False
            raw_env.action_idx = t

            new_obs, rewards_dict, done, _ = env.step(env_action)
            new_obs, new_cor = new_obs['obs'], new_obs['nonviz_sensor']

            sparse_reward = rewards_dict["sparse"]
            rich_reward = rewards_dict["rich"]
            rew = rich_reward if use_rich_reward else sparse_reward

            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            state_action_buffer.append([obs, action])
            action_idx_buffer.append(t)

            action_idxs, feedbacks, correct_feedbacks = \
                get_simulated_feedback([cor] if use_2D_env else [obs], [action], [t], judge_action,
                                       good_feedback_acc, bad_feedback_acc)

            performance["feedback"].extend(correct_feedbacks)
            performance["sparse_reward"].append(sparse_reward)
            performance["rich_reward"].append(rich_reward)

            obs, cor = new_obs, new_cor

            if use_feedback:
                if use_real_feedback:
                    feedbacks, action_idxs = get_feedback_from_LSL(
                        feedback_LSL_stream)
                feedback_epi_buffer = [
                    state_action_buffer[action_idx_buffer.index(a_idx)] +
                    [feedback]
                    for a_idx, feedback in zip(action_idxs, feedbacks)
                ]

                # add feedbacks into feedback replay buffer
                if feedback_epi_buffer:
                    epi_feedback_test_num += len(feedback_epi_buffer) * (
                        1 - feedback_training_prop)
                    epi_test_int = int(epi_feedback_test_num)
                    epi_feedback_test_num -= epi_test_int
                    epi_test_inds = np.random.choice(len(feedback_epi_buffer),
                                                     epi_test_int,
                                                     replace=False)
                    epi_train_inds = [
                        ind for ind in range(len(feedback_epi_buffer))
                        if ind not in epi_test_inds
                    ]
                    feedback_buffer_train.extend(
                        [feedback_epi_buffer[ind] for ind in epi_train_inds])
                    feedback_buffer_valid.extend(
                        [feedback_epi_buffer[ind] for ind in epi_test_inds])

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                obs, cor = obs['obs'], obs['nonviz_sensor']
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                for _ in range(dqn_epochs):
                    if prioritized_replay:
                        experience = replay_buffer.sample(
                            batch_size, beta=beta_schedule.value(t))
                        (obses_t, actions, rewards, obses_tp1, dones, weights,
                         batch_idxes) = experience
                    else:
                        obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                            batch_size)
                        weights, batch_idxes = np.ones_like(rewards), None
                    td_errors = train_rl(obses_t, actions, rewards, obses_tp1,
                                         dones, weights)
                    if prioritized_replay:
                        new_priorities = np.abs(
                            td_errors) + prioritized_replay_eps
                        replay_buffer.update_priorities(
                            batch_idxes, new_priorities)

            # train feedback regressor
            if use_feedback and len(
                    feedback_buffer_train
            ) >= min_feedback_buffer_size and t <= only_use_hr_until:
                for i in range(feedback_epochs):

                    if i < feedback_epochs * feedback_training_new_prop:
                        inds = np.arange(
                            len(feedback_buffer_train) - feedback_batch_size,
                            len(feedback_buffer_train))
                    else:
                        inds = np.random.choice(len(feedback_buffer_train),
                                                feedback_batch_size,
                                                replace=False)

                    np.random.shuffle(inds)
                    for start in range(0, feedback_batch_size,
                                       feedback_minibatch_size):
                        end = start + feedback_minibatch_size
                        obses = np.asarray([
                            feedback_buffer_train[idx][0]
                            for idx in inds[start:end]
                        ])
                        actions = np.asarray([
                            feedback_buffer_train[idx][1]
                            for idx in inds[start:end]
                        ])
                        feedbacks = np.asarray([
                            feedback_buffer_train[idx][2]
                            for idx in inds[start:end]
                        ])
                        pred, loss = train_hr(obses, actions, feedbacks)

                obs_train = np.asarray(
                    [feedback[0] for feedback in feedback_buffer_train])
                actions_train = np.asarray(
                    [feedback[1] for feedback in feedback_buffer_train])
                feedbacks_train = np.asarray(
                    [feedback[2] for feedback in feedback_buffer_train])
                obs_valid = np.asarray(
                    [feedback[0] for feedback in feedback_buffer_valid])
                actions_valid = np.asarray(
                    [feedback[1] for feedback in feedback_buffer_valid])
                feedbacks_valid = np.asarray(
                    [feedback[2] for feedback in feedback_buffer_valid])
                train_acc, train_loss = evaluate_hr(obs_train, actions_train,
                                                    feedbacks_train)
                valid_acc, valid_loss = evaluate_hr(obs_valid, actions_valid,
                                                    feedbacks_valid)
                print(
                    "HR: train acc {:>4.2f}, loss {:>5.2f}; valid acc {:>4.2f}, loss {:>5.2f}"
                    .format(train_acc, train_loss, valid_acc, valid_loss))

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("% time spent exploring",
                                      int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts
                    and num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return act, performance
Пример #18
0
def learn(*,
          network,
          env,
          total_timesteps,
          seed=None,
          eval_env=None,
          replay_strategy='future',
          policy_save_interval=5,
          clip_return=True,
          demo_file=None,
          override_params=None,
          load_path=None,
          save_path=None,
          **kwargs):

    override_params = override_params or {}
    if MPI is not None:
        rank = MPI.COMM_WORLD.Get_rank()
        num_cpu = MPI.COMM_WORLD.Get_size()

    # Seed everything.
    rank_seed = seed + 1000000 * rank if seed is not None else None
    set_global_seeds(rank_seed)

    # Prepare params.
    params = config.DEFAULT_PARAMS
    env_name = env.specs[0].id
    params['env_name'] = env_name
    params['replay_strategy'] = replay_strategy
    if env_name in config.DEFAULT_ENV_PARAMS:
        params.update(config.DEFAULT_ENV_PARAMS[env_name]
                      )  # merge env-specific parameters in
    params.update(
        **override_params)  # makes it possible to override any parameter
    with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
        json.dump(params, f)
    params = config.prepare_params(params)
    params['rollout_batch_size'] = env.num_envs

    if demo_file is not None:
        params['bc_loss'] = 1
    params.update(kwargs)

    config.log_params(params, logger=logger)

    if num_cpu == 1:
        logger.warn()
        logger.warn('*** Warning ***')
        logger.warn(
            'You are running HER with just a single MPI worker. This will work, but the '
            +
            'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) '
            +
            'were obtained with --num_cpu 19. This makes a significant difference and if you '
            +
            'are looking to reproduce those results, be aware of this. Please also refer to '
            +
            'https://github.com/openai/baselines/issues/314 for further details.'
        )
        logger.warn('****************')
        logger.warn()

    dims = config.configure_dims(params)
    policy = config.configure_ddpg(dims=dims,
                                   params=params,
                                   clip_return=clip_return)
    if load_path is not None:
        tf_util.load_variables(load_path)

    rollout_params = {
        'exploit': False,
        'use_target_net': False,
        'use_demo_states': True,
        'compute_Q': False,
        'T': params['T'],
    }

    eval_params = {
        'exploit': True,
        'use_target_net': params['test_with_polyak'],
        'use_demo_states': False,
        'compute_Q': True,
        'T': params['T'],
    }

    for name in [
            'T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps'
    ]:
        rollout_params[name] = params[name]
        eval_params[name] = params[name]

    eval_env = eval_env or env

    rollout_worker = RolloutWorker(env,
                                   policy,
                                   dims,
                                   logger,
                                   monitor=True,
                                   **rollout_params)
    evaluator = RolloutWorker(eval_env, policy, dims, logger, **eval_params)

    n_cycles = params['n_cycles']
    n_epochs = total_timesteps // n_cycles // rollout_worker.T // rollout_worker.rollout_batch_size

    return train(save_path=save_path,
                 policy=policy,
                 rollout_worker=rollout_worker,
                 evaluator=evaluator,
                 n_epochs=n_epochs,
                 n_test_rollouts=params['n_test_rollouts'],
                 n_cycles=params['n_cycles'],
                 n_batches=params['n_batches'],
                 policy_save_interval=policy_save_interval,
                 demo_file=demo_file)
Пример #19
0
def learn(env,
          use_ddpg=False,
          gamma=0.9,
          controller_kargs={},
          option_kargs={},
          seed=None,
          total_timesteps=100000,
          print_freq=100,
          callback=None,
          checkpoint_path=None,
          checkpoint_freq=10000,
          load_path=None,
          **others):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    use_ddpg: bool
        whether to use DDPG or DQN to learn the option's policies
    gamma: float
        discount factor
    controller_kargs
        arguments for learning the controller policy.
    option_kargs
        arguments for learning the option policies.
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    total_timesteps: int
        number of env steps to optimizer for
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    load_path: str
        path to load the model from. (default: None)

    Returns
    -------
    act: ActWrapper (meta-controller)
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    act: ActWrapper (option policies)
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    controller = ControllerDQN(env, **controller_kargs)
    if use_ddpg:
        options = OptionDDPG(env, gamma, total_timesteps, **option_kargs)
    else:
        options = OptionDQN(env, gamma, total_timesteps, **option_kargs)
    option_s = None  # State where the option initiated
    option_id = None  # Id of the current option being executed
    option_rews = []  # Rewards obtained by the current option

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    options.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break

            # Selecting an option if needed
            if option_id is None:
                valid_options = env.get_valid_options()
                option_s = obs
                option_id = controller.get_action(option_s, valid_options)
                option_rews = []

            # Take action and update exploration to the newest value
            action = options.get_action(env.get_option_observation(option_id),
                                        t, reset)
            reset = False
            new_obs, rew, done, info = env.step(action)

            # Saving the real reward that the option is getting
            option_rews.append(rew)

            # Store transition for the option policies
            for _s, _a, _r, _sn, _done in env.get_experience():
                options.add_experience(_s, _a, _r, _sn, _done)

            # Learn and update the target networks if needed for the option policies
            options.learn(t)
            options.update_target_network(t)

            # Update the meta-controller if needed
            # Note that this condition always hold if done is True
            if env.did_option_terminate(option_id):
                option_sn = new_obs
                option_reward = sum(
                    [_r * gamma**_i for _i, _r in enumerate(option_rews)])
                valid_options = [] if done else env.get_valid_options()
                controller.add_experience(option_s, option_id, option_reward,
                                          option_sn, done, valid_options,
                                          gamma**(len(option_rews)))
                controller.learn()
                controller.update_target_network()
                controller.increase_step()
                option_id = None

            obs = new_obs
            episode_rewards[-1] += rew

            if done:
                obs = env.reset()
                options.reset()
                episode_rewards.append(0.0)
                reset = True

            # General stats
            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.dump_tabular()

            if (checkpoint_freq is not None and num_episodes > 100
                    and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log(
                            "Saving model due to mean reward increase: {} -> {}"
                            .format(saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    return controller.act, options.act
Пример #20
0
    def prepare_agent(_env,
                      eval_env,
                      active,
                      exploration='eps_greedy',
                      action_l2=None,
                      scope=None,
                      ss=False,
                      load_path=None):
        # Prepare params.
        _params = copy.deepcopy(config.DEFAULT_PARAMS)
        _kwargs = copy.deepcopy(kwargs)
        _override_params = copy.deepcopy(override_params)

        env_name = _env.spec.id
        _params['env_name'] = env_name
        _params['replay_strategy'] = replay_strategy
        _params['ss'] = ss
        if action_l2 is not None:
            _params['action_l2'] = action_l2
        if not active:
            _params["buffer_size"] = 1
        if env_name in config.DEFAULT_ENV_PARAMS:
            _params.update(config.DEFAULT_ENV_PARAMS[env_name]
                           )  # merge env-specific parameters in
        _params.update(
            **_override_params)  # makes it possible to override any parameter
        with open(os.path.join(logger.get_dir(), 'params.json'), 'w') as f:
            json.dump(_params, f)
        _params = config.prepare_params(_params)
        _params['rollout_batch_size'] = _env.num_envs

        if demo_file is not None:
            _params['bc_loss'] = 1
        _params.update(_kwargs)

        config.log_params(_params, logger=logger)

        if num_cpu == 1:
            logger.warn()
            logger.warn('*** Warning ***')
            logger.warn(
                'You are running HER with just a single MPI worker. This will work, but the '
                +
                'experiments that we report in Plappert et al. (2018, https://arxiv.org/abs/1802.09464) '
                +
                'were obtained with --num_cpu 19. This makes a significant difference and if you '
                +
                'are looking to reproduce those results, be aware of this. Please also refer to '
                +
                'https://github.com/openai/baselines/issues/314 for further details.'
            )
            logger.warn('****************')
            logger.warn()

        dims, coord_dict = config.configure_dims(_params)
        _params['ddpg_params']['scope'] = scope
        policy, reward_fun = config.configure_ddpg(dims=dims,
                                                   params=_params,
                                                   active=active,
                                                   clip_return=clip_return)
        if load_path is not None:
            tf_util.load_variables(load_path)
            print(f"Loaded model: {load_path}")

        rollout_params = {
            'exploit': False,
            'use_target_net': False,
            'use_demo_states': True,
            'compute_Q': False,
            'exploration': exploration
        }

        eval_params = {
            'exploit': True,
            'use_target_net': _params['test_with_polyak'],
            'use_demo_states': False,
            'compute_Q': True,
        }

        for name in [
                'T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps'
        ]:
            rollout_params[name] = _params[name]
            eval_params[name] = _params[name]

        eval_env = eval_env or _env

        rollout_worker = RolloutWorker(_env,
                                       policy,
                                       dims,
                                       logger,
                                       active,
                                       monitor=True,
                                       **rollout_params)
        evaluator = RolloutWorker(eval_env, policy, dims, logger, active,
                                  **eval_params)

        return policy, rollout_worker, evaluator, _params, coord_dict, reward_fun
Пример #21
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
    batch_size: int
        size of a batch sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    
    ############################## RL-S Prepare #############################################
    
    # model saved name
    saved_name = "0817"

    #####
    # Setup Training Record
    #####
    save_new_data = False
    create_new_file = False
    create_new_file_rule = create_new_file
    save_new_data_rule = save_new_data

    create_new_file_RL = False
    save_new_data_RL = save_new_data
    
    create_new_file_replay_buffer = False
    save_new_data_replay_buffer = save_new_data

    is_training = False
    trajectory_buffer = deque(maxlen=20)

    if create_new_file_replay_buffer:
        if osp.exists("recorded_replay_buffer.txt"):
            os.remove("recorded_replay_buffer.txt")
    else:
        replay_buffer_dataset = np.loadtxt("recorded_replay_buffer.txt")
        for data in replay_buffer_dataset:
            obs, action, rew, new_obs, done = _extract_data(data)
            replay_buffer.add(obs, action, rew, new_obs, done)

    recorded_replay_buffer_outfile = open("recorded_replay_buffer.txt","a")
    recorded_replay_buffer_format = " ".join(("%f",)*31)+"\n"
    
    #####
    # Setup Rule-based Record
    #####
    create_new_file_rule = True

    # create state database
    if create_new_file_rule:
        if osp.exists("state_index_rule.dat"):
            os.remove("state_index_rule.dat")
            os.remove("state_index_rule.idx")
        if osp.exists("visited_state_rule.txt"):
            os.remove("visited_state_rule.txt")
        if osp.exists("visited_value_rule.txt"):
            os.remove("visited_value_rule.txt")

        visited_state_rule_value = []
        visited_state_rule_counter = 0
    else:
        visited_state_rule_value = np.loadtxt("visited_value_rule.txt")
        visited_state_rule_value = visited_state_rule_value.tolist()
        visited_state_rule_counter = len(visited_state_rule_value)

    visited_state_rule_outfile = open("visited_state_rule.txt", "a")
    visited_state_format = " ".join(("%f",)*14)+"\n"

    visited_value_rule_outfile = open("visited_value_rule.txt", "a")
    visited_value_format = " ".join(("%f",)*2)+"\n"

    visited_state_tree_prop = rindex.Property()
    visited_state_tree_prop.dimension = 14
    visited_state_dist = np.array([[0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2]])
    visited_state_rule_tree = rindex.Index('state_index_rule',properties=visited_state_tree_prop)

    #####
    # Setup RL-based Record
    #####

    if create_new_file_RL:
        if osp.exists("state_index_RL.dat"):
            os.remove("state_index_RL.dat")
            os.remove("state_index_RL.idx")
        if osp.exists("visited_state_RL.txt"):
            os.remove("visited_state_RL.txt")
        if osp.exists("visited_value_RL.txt"):
            os.remove("visited_value_RL.txt")

    if create_new_file_RL:
        visited_state_RL_value = []
        visited_state_RL_counter = 0
    else:
        visited_state_RL_value = np.loadtxt("visited_value_RL.txt")
        visited_state_RL_value = visited_state_RL_value.tolist()
        visited_state_RL_counter = len(visited_state_RL_value)

    visited_state_RL_outfile = open("visited_state_RL.txt", "a")
    visited_state_format = " ".join(("%f",)*14)+"\n"

    visited_value_RL_outfile = open("visited_value_RL.txt", "a")
    visited_value_format = " ".join(("%f",)*2)+"\n"

    visited_state_tree_prop = rindex.Property()
    visited_state_tree_prop.dimension = 14
    visited_state_dist = np.array([[0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2, 10, 0.2, 2]])
    visited_state_RL_tree = rindex.Index('state_index_RL',properties=visited_state_tree_prop)


    ############################## RL-S Prepare End #############################################
    
    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action, q_function_cz = act(np.array(obs)[None], update_eps=update_eps, **kwargs)
            
            # RLS_action = generate_RLS_action(obs,q_function_cz,action,visited_state_rule_value,
            #                                 visited_state_rule_tree,visited_state_RL_value,
            #                                 visited_state_RL_tree,is_training)

            RLS_action = 0

            env_action = RLS_action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)

            ########### Record data in trajectory buffer and local file, but not in replay buffer ###########

            trajectory_buffer.append((obs, action, float(rew), new_obs, float(done)))

            # Store transition in the replay buffer.
            # replay_buffer.add(obs, action, rew, new_obs, float(done))

            obs = new_obs
            episode_rewards[-1] += rew # safe driving is 1, collision is 0


            while len(trajectory_buffer)>10:
                # if safe driving for 10(can be changed) steps, the state is regarded as safe
                obs_left, action_left, rew_left, new_obs_left, done_left = trajectory_buffer.popleft()
                # save this state in local replay buffer file
                if save_new_data_replay_buffer:
                    recorded_data = _wrap_data(obs_left, action_left, rew_left, new_obs_left, done_left)
                    recorded_replay_buffer_outfile.write(recorded_replay_buffer_format % tuple(recorded_data))
                # put this state in replay buffer
                replay_buffer.add(obs_left[0], action_left, float(rew_left), new_obs_left[0], float(done_left))
                action_to_record = action_left
                r_to_record = rew_left
                obs_to_record = obs_left

                # save this state in rule-based or RL-based visited state
                if action_left == 0:
                    if save_new_data_rule:
                        visited_state_rule_value.append([action_to_record,r_to_record])
                        visited_state_rule_tree.insert(visited_state_rule_counter,
                            tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                        visited_state_rule_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                        visited_value_rule_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                        visited_state_rule_counter += 1
                else:
                    if save_new_data_RL:
                        visited_state_RL_value.append([action_to_record,r_to_record])
                        visited_state_RL_tree.insert(visited_state_RL_counter,
                            tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                        visited_state_RL_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                        visited_value_RL_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                        visited_state_RL_counter += 1

            ################# Record data end ########################
            
            
            if done:
                """ 
                Get collision or out of multilane map
                """
                ####### Record the trajectory data and add data in replay buffer #########
                _, _, rew_right, _, _ = trajectory_buffer[-1]

                while len(trajectory_buffer)>0:
                    obs_left, action_left, rew_left, new_obs_left, done_left = trajectory_buffer.popleft()
                    action_to_record = action_left
                    r_to_record = (rew_right-rew_left)*gamma**len(trajectory_buffer) + rew_left
                    # record in local replay buffer file
                    if save_new_data_replay_buffer:
                        obs_to_record = obs_left
                        recorded_data = _wrap_data(obs_left, action_left, r_to_record, new_obs_left, done_left)
                        recorded_replay_buffer_outfile.write(recorded_replay_buffer_format % tuple(recorded_data))
                    # record in replay buffer for trainning
                    replay_buffer.add(obs_left[0], action_left, float(r_to_record), new_obs_left[0], float(done_left))

                    # save visited rule/RL state data in local file
                    if action_left == 0:
                        if save_new_data_rule:
                            visited_state_rule_value.append([action_to_record,r_to_record])
                            visited_state_rule_tree.insert(visited_state_rule_counter,
                                tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                            visited_state_rule_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                            visited_value_rule_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                            visited_state_rule_counter += 1
                    else:
                        if save_new_data_RL:
                            visited_state_RL_value.append([action_to_record,r_to_record])
                            visited_state_RL_tree.insert(visited_state_RL_counter,
                                tuple((obs_to_record-visited_state_dist).tolist()[0]+(obs_to_record+visited_state_dist).tolist()[0]))
                            visited_state_RL_outfile.write(visited_state_format % tuple(obs_to_record[0]))
                            visited_value_RL_outfile.write(visited_value_format % tuple([action_to_record,r_to_record]))
                            visited_state_RL_counter += 1

                ####### Recorded #####

                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            ############### Trainning Part Start #####################
            if not is_training:
                # don't need to train the model
                continue

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward

                    rew_str = str(mean_100ep_reward)
                    path = osp.expanduser("~/models/carlaok_checkpoint/"+saved_name+"_"+rew_str)
                    act.save(path)

        #### close the file ####
        visited_state_rule_outfile.close()
        visited_value_rule_outfile.close()
        recorded_replay_buffer_outfile.close()
        if not is_training:
            testing_record_outfile.close()
        #### close the file ###

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Пример #22
0
def learn(env,
          network,
          seed=None,
          lr=5e-4,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          callback=None,
          load_path=None,
          **network_kwargs
            ):
    """Train a deepq model.

    Parameters
    -------
    env: gym.Env
        environment to train on
    network: string or a function
        neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
        (mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
        will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
    seed: int or None
        prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
    lr: float
        learning rate for adam optimizer
    total_timesteps: int
        number of env steps to optimizer for
    buffer_size: int
        size of the replay buffer
    exploration_fraction: float
        fraction of entire training period over which the exploration rate is annealed
    exploration_final_eps: float
        final value of random action probability
    train_freq: int
        update the model every `train_freq` steps.
        set to None to disable printing
    batch_size: int
        size of a batched sampled from replay buffer for training
    print_freq: int
        how often to print out training progress
        set to None to disable printing
    checkpoint_freq: int
        how often to save the model. This is so that the best version is restored
        at the end of the training. If you do not wish to restore the best version at
        the end of the training set this variable to None.
    learning_starts: int
        how many steps of the model to collect transitions for before learning starts
    gamma: float
        discount factor
    target_network_update_freq: int
        update the target network every `target_network_update_freq` steps.
    prioritized_replay: True
        if True prioritized replay buffer will be used.
    prioritized_replay_alpha: float
        alpha parameter for prioritized replay buffer
    prioritized_replay_beta0: float
        initial value of beta for prioritized replay buffer
    prioritized_replay_beta_iters: int
        number of iterations over which beta will be annealed from initial value
        to 1.0. If set to None equals to total_timesteps.
    prioritized_replay_eps: float
        epsilon to add to the TD errors when updating priorities.
    param_noise: bool
        whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
    callback: (locals, globals) -> None
        function called at every steps with state of the algorithm.
        If callback returns true training stops.
    load_path: str
        path to load the model from. (default: None)
    **network_kwargs
        additional keyword arguments to pass to the network builder.

    Returns
    -------
    act: ActWrapper
        Wrapper over act function. Adds ability to save it and load it.
        See header of baselines/deepq/categorical.py for details on the act function.
    """
    # Create all the functions necessary to train the model

    sess = get_session()
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    # capture the shape outside the closure so that the env object is not serialized
    # by cloudpickle when serializing make_obs_ph

    observation_space = env.observation_space
    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    act, train, update_target, debug = deepq.build_train(
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        param_noise=param_noise
    )

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None
    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()
    reset = True

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))


        for t in range(total_timesteps):
            if callback is not None:
                if callback(locals(), globals()):
                    break
            # Take action and update exploration to the newest value
            kwargs = {}
            if not param_noise:
                update_eps = exploration.value(t)
                update_param_noise_threshold = 0.
            else:
                update_eps = 0.
                # Compute the threshold such that the KL divergence between perturbed and non-perturbed
                # policy is comparable to eps-greedy exploration with eps = exploration.value(t).
                # See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
                # for detailed explanation.
                update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
                kwargs['reset'] = reset
                kwargs['update_param_noise_threshold'] = update_param_noise_threshold
                kwargs['update_param_noise_scale'] = True
            action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
            env_action = action
            reset = False
            new_obs, rew, done, _ = env.step(env_action)
            # Store transition in the replay buffer.
            replay_buffer.add(obs, action, rew, new_obs, float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

            if t > learning_starts and t % train_freq == 0:
                # Minimize the error in Bellman's equation on a batch sampled from replay buffer.
                if prioritized_replay:
                    experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes, new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
                logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
                logger.dump_tabular()

            if (checkpoint_freq is not None and t > learning_starts and
                    num_episodes > 100 and t % checkpoint_freq == 0):
                if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
                    if print_freq is not None:
                        logger.log("Saving model due to mean reward increase: {} -> {}".format(
                                   saved_mean_reward, mean_100ep_reward))
                    save_variables(model_file)
                    model_saved = True
                    saved_mean_reward = mean_100ep_reward
        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
            load_variables(model_file)

    return act
Пример #23
0
def learn(env,
          network,
          seed=None,
          lr=1e-3,
          total_timesteps=100000,
          buffer_size=50000,
          exploration_fraction=0.1,
          exploration_final_eps=0.02,
          train_freq=1,
          batch_size=32,
          print_freq=100,
          checkpoint_freq=10000,
          checkpoint_path=None,
          learning_starts=1000,
          gamma=1.0,
          target_network_update_freq=500,
          prioritized_replay=False,
          prioritized_replay_alpha=0.6,
          prioritized_replay_beta0=0.4,
          prioritized_replay_beta_iters=None,
          prioritized_replay_eps=1e-6,
          param_noise=False,
          num_cpu=5,
          callback=None,
          scope='co_deepq',
          pilot_tol=0,
          pilot_is_human=False,
          reuse=False,
          load_path=None,
          **network_kwargs):
    # Create all the functions necessary to train the model

    sess = get_session()  #tf.Session(graph=tf.Graph())
    set_global_seeds(seed)

    q_func = build_q_func(network, **network_kwargs)

    observation_space = env.observation_space

    def make_obs_ph(name):
        return ObservationInput(observation_space, name=name)

    using_control_sharing = True  #pilot_tol > 0

    if pilot_is_human:
        utils.human_agent_action = init_human_action()
        utils.human_agent_active = False

    act, train, update_target, debug = co_build_train(
        scope=scope,
        make_obs_ph=make_obs_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        reuse=tf.AUTO_REUSE if reuse else False,
        using_control_sharing=using_control_sharing)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    act = ActWrapper(act, act_params)

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = total_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    episode_outcomes = []
    saved_mean_reward = None
    obs = env.reset()
    reset = True
    prev_t = 0
    rollouts = []

    if not using_control_sharing:
        exploration = LinearSchedule(schedule_timesteps=int(
            exploration_fraction * total_timesteps),
                                     initial_p=1.0,
                                     final_p=exploration_final_eps)

    with tempfile.TemporaryDirectory() as td:
        td = checkpoint_path or td

        model_file = os.path.join(td, "model")
        model_saved = False

        if tf.train.latest_checkpoint(td) is not None:
            load_variables(model_file)
            logger.log('Loaded model from {}'.format(model_file))
            model_saved = True
        elif load_path is not None:
            load_variables(load_path)
            logger.log('Loaded model from {}'.format(load_path))

        for t in range(total_timesteps):
            masked_obs = mask_helipad(obs)

            act_kwargs = {}
            if using_control_sharing:
                if pilot_is_human:
                    act_kwargs['pilot_action'] = env.unwrapped.pilot_policy(
                        obs[None, :9])
                else:
                    act_kwargs[
                        'pilot_action'] = env.unwrapped.pilot_policy.step(
                            obs[None, :9])
                act_kwargs['pilot_tol'] = pilot_tol if not pilot_is_human or (
                    pilot_is_human and utils.human_agent_active) else 0
            else:
                act_kwargs['update_eps'] = exploration.value(t)

            #action = act(masked_obs[None, :], **act_kwargs)[0][0]
            action = act(np.array(masked_obs)[None], **act_kwargs)[0][0]
            env_action = action
            reset = False
            new_obs, rew, done, info = env.step(env_action)

            if pilot_is_human:
                env.render()

            # Store transition in the replay buffer.
            masked_new_obs = mask_helipad(new_obs)
            replay_buffer.add(masked_obs, action, rew, masked_new_obs,
                              float(done))
            obs = new_obs

            episode_rewards[-1] += rew
            if done:
                obs = env.reset()
                episode_rewards.append(0.0)
                reset = True

                if pilot_is_human:
                    utils.human_agent_action = init_human_action()
                    utils.human_agent_active = False
                    time.sleep(2)

            if t > learning_starts and t % train_freq == 0:
                if prioritized_replay:
                    experience = replay_buffer.sample(
                        batch_size, beta=beta_schedule.value(t))
                    (obses_t, actions, rewards, obses_tp1, dones, weights,
                     batch_idxes) = experience
                else:
                    obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                        batch_size)
                    weights, batch_idxes = np.ones_like(rewards), None
                td_errors = train(obses_t, actions, rewards, obses_tp1, dones,
                                  weights)

                if prioritized_replay:
                    new_priorities = np.abs(td_errors) + prioritized_replay_eps
                    replay_buffer.update_priorities(batch_idxes,
                                                    new_priorities)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            episode_outcomes.append(rew)
            episode_rewards.append(0.0)

            if t > learning_starts and t % target_network_update_freq == 0:
                # Update target network periodically.
                update_target()

            mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
            mean_100ep_succ = round(
                np.mean(
                    [1 if x == 100 else 0 for x in episode_outcomes[-101:-1]]),
                2)
            mean_100ep_crash = round(
                np.mean([
                    1 if x == -100 else 0 for x in episode_outcomes[-101:-1]
                ]), 2)
            num_episodes = len(episode_rewards)
            if done and print_freq is not None and len(
                    episode_rewards) % print_freq == 0:
                logger.record_tabular("steps", t)
                logger.record_tabular("episodes", num_episodes)
                logger.record_tabular("mean 100 episode reward",
                                      mean_100ep_reward)
                logger.record_tabular("mean 100 episode succ", mean_100ep_succ)
                logger.record_tabular("mean 100 episode crash",
                                      mean_100ep_crash)
                logger.dump_tabular()

            if checkpoint_freq is not None and t > learning_starts and num_episodes > 100 and t % checkpoint_freq == 0 and (
                    saved_mean_reward is None
                    or mean_100ep_reward > saved_mean_reward):
                if print_freq is not None:
                    logger.log(
                        "Saving model due to mean reward increase: {} -> {}".
                        format(saved_mean_reward, mean_100ep_reward))
                save_variables(model_file)
                model_saved = True
                saved_mean_reward = mean_100ep_reward

        if model_saved:
            if print_freq is not None:
                logger.log("Restored model with mean reward: {}".format(
                    saved_mean_reward))
            load_variables(model_file)

    reward_data = {'rewards': episode_rewards, 'outcomes': episode_outcomes}

    return act, reward_data