コード例 #1
0
ファイル: policies.py プロジェクト: hjp-muser/RL_Snake_Robot
def build_policy(env,
                 policy_network,
                 estimate_q=True,
                 q_network=None,
                 **network_kwargs):
    if isinstance(policy_network, str):
        policy_network = get_network_builder(policy_network)(**network_kwargs)
    else:
        assert callable(policy_network)

    if estimate_q:
        # The architecture of q_network is the same as policy_network's.
        if q_network is None:
            q_network = get_network_builder("mlp")(**network_kwargs)
        elif isinstance(q_network, str):
            q_network = get_network_builder(q_network)(**network_kwargs)
        else:
            assert callable(q_network)
            q_network = q_network

    def policy_fn(obs_ph=None,
                  normalize_observations=False,
                  vf_latent=None,
                  sess=None):
        # preprocess input
        ob_space = env.observation_space
        X = obs_ph if obs_ph is not None else observation_placeholder(ob_space)
        extra_tensors = {}
        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X
        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(encoded_x)

        policy = PolicyModel(env=env,
                             observations=X,
                             policy_latent=policy_latent,
                             vf_latent=vf_latent,
                             estimate_q=estimate_q,
                             q_network=q_network,
                             sess=sess,
                             **extra_tensors)
        return policy

    return policy_fn
コード例 #2
0
def build_policy(observation_space,action_space, policy_network, value_network=None,  normalize_observations=False, estimate_q=False, **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):
        ob_space = observation_space

        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)

        extra_tensors = {}

        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X

        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(encoded_x)
            if isinstance(policy_latent, tuple):
                policy_latent, recurrent_tensors = policy_latent

                if recurrent_tensors is not None:
                    # recurrent architecture, need a few more steps
                    nenv = nbatch // nsteps
                    assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
                    policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
                    extra_tensors.update(recurrent_tensors)


        _v_net = value_network

        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            if _v_net == 'copy':
                _v_net = policy_network
            else:
                assert callable(_v_net)

            with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
                # TODO recurrent architectures are not supported with value_network=copy yet
                vf_latent = _v_net(encoded_x)

        policy = PolicyWithValue(
            observation_space=observation_space,
            action_space=action_space,
            observations=X,
            latent=policy_latent,
            vf_latent=vf_latent,
            sess=sess,
            estimate_q=estimate_q,
            **extra_tensors
        )
        return policy

    return policy_fn
コード例 #3
0
def build_value(observation_space, value_network, **network_kwargs):
    if isinstance(value_network, str):
        value_network = get_network_builder(value_network)(**network_kwargs)
    else:
        assert callable(value_network)

    def value_fn(obs_ph=None, normalize_observations=False, sess=None):
        ob_space = observation_space
        X = obs_ph if obs_ph is not None else observation_placeholder(ob_space)
        extra_tensors = {}
        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X
        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
            vf_latent = value_network(encoded_x)

        value = ValueModel(observations=X,
                           latent=vf_latent,
                           sess=sess,
                           **extra_tensors)
        return value

    return value_fn
コード例 #4
0
ファイル: models.py プロジェクト: a1625395374/RL_assembly
def build_q_func(network,
                 hiddens=[256],
                 dueling=True,
                 layer_norm=False,
                 **network_kwargs):
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        network = get_network_builder(network)(**network_kwargs)

    def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
        with tf.variable_scope(scope, reuse=reuse):
            latent = network(input_placeholder)
            if isinstance(latent, tuple):
                if latent[1] is not None:
                    raise NotImplementedError(
                        "DQN is not compatible with recurrent policies yet")
                latent = latent[0]

            latent = layers.flatten(latent)

            with tf.variable_scope("action_value"):
                action_out = latent
                for hidden in hiddens:
                    action_out = layers.fully_connected(action_out,
                                                        num_outputs=hidden,
                                                        activation_fn=None)
                    if layer_norm:
                        action_out = layers.layer_norm(action_out,
                                                       center=True,
                                                       scale=True)
                    action_out = tf.nn.relu(action_out)
                action_scores = layers.fully_connected(action_out,
                                                       num_outputs=num_actions,
                                                       activation_fn=None)

            if dueling:
                with tf.variable_scope("state_value"):
                    state_out = latent
                    for hidden in hiddens:
                        state_out = layers.fully_connected(state_out,
                                                           num_outputs=hidden,
                                                           activation_fn=None)
                        if layer_norm:
                            state_out = layers.layer_norm(state_out,
                                                          center=True,
                                                          scale=True)
                        state_out = tf.nn.relu(state_out)
                    state_score = layers.fully_connected(state_out,
                                                         num_outputs=1,
                                                         activation_fn=None)
                action_scores_mean = tf.reduce_mean(action_scores, 1)
                action_scores_centered = action_scores - tf.expand_dims(
                    action_scores_mean, 1)
                q_out = state_score + action_scores_centered
            else:
                q_out = action_scores
            return q_out

    return q_func_builder
コード例 #5
0
ファイル: policies.py プロジェクト: MrGoogol/baselines
def build_policy(env, policy_network, value_network=None,  normalize_observations=False, estimate_q=False, **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None, nsteps=None, sess=None, observ_placeholder=None):
        ob_space = env.observation_space

        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)

        extra_tensors = {}

        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X

        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(encoded_x)
            if isinstance(policy_latent, tuple):
                policy_latent, recurrent_tensors = policy_latent

                if recurrent_tensors is not None:
                    # recurrent architecture, need a few more steps
                    nenv = nbatch // nsteps
                    assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
                    policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
                    extra_tensors.update(recurrent_tensors)


        _v_net = value_network

        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            if _v_net == 'copy':
                _v_net = policy_network
            else:
                assert callable(_v_net)

            with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
                # TODO recurrent architectures are not supported with value_network=copy yet
                vf_latent = _v_net(encoded_x)

        policy = PolicyWithValue(
            env=env,
            observations=X,
            latent=policy_latent,
            vf_latent=vf_latent,
            sess=sess,
            estimate_q=estimate_q,
            **extra_tensors
        )
        return policy

    return policy_fn
コード例 #6
0
 def __init__(self, config, section):
     network_args = {}
     if config.has_option(section, 'nlstm'):
         network_args['nlstm'] = config.getint(section, 'nlstm')
     if config.has_option(section, 'layer_norm'):
         network_args['layer_norm'] = config.getboolean(
             section, 'layer_norm')
     self._network_fn = get_network_builder('lstm')(**network_args)
コード例 #7
0
ファイル: models.py プロジェクト: grockious/deepsynth
 def __init__(self, nb_actions, ob_shape, name='critic', network='mlp', **network_kwargs):
     super().__init__(name=name, network=network, **network_kwargs)
     self.layer_norm = True
     self.network_builder = get_network_builder(network)(**network_kwargs)((ob_shape[0] + nb_actions,))
     self.output_layer = tf.keras.layers.Dense(units=1,
                                               kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
                                               name='output')
     _ = self.output_layer(self.network_builder.outputs[0])
コード例 #8
0
ファイル: models.py プロジェクト: grockious/deepsynth
 def __init__(self, nb_actions, ob_shape, name='actor', network='mlp', **network_kwargs):
     super().__init__(name=name, network=network, **network_kwargs)
     self.nb_actions = nb_actions
     self.network_builder = get_network_builder(network)(**network_kwargs)(ob_shape)
     self.output_layer = tf.keras.layers.Dense(units=self.nb_actions,
                                               activation=tf.keras.activations.tanh,
                                               kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3))
     _ = self.output_layer(self.network_builder.outputs[0])
コード例 #9
0
def learn_letters(agent, env):
    if agent == "dqn":
        model = deepq.learn(
            env,
            "mlp",
            num_layers=4,
            num_hidden=128,
            activation=tf.tanh,  # tf.nn.relu
            hiddens=[128],
            dueling=True,
            lr=1e-5,
            total_timesteps=int(1e7),
            buffer_size=100000,
            batch_size=32,
            exploration_fraction=0.1,
            exploration_final_eps=0.1,  #0.01, -> testing...
            train_freq=1,
            learning_starts=10000,
            target_network_update_freq=100,
            gamma=0.9,
            print_freq=50)

    elif "ppo" in agent:
        mlp_net = get_network_builder("mlp")(num_layers=5,
                                             num_hidden=128,
                                             activation=tf.tanh)  # tf.nn.relu
        ppo_params = dict(
            nsteps=128,
            ent_coef=0.01,
            vf_coef=0.5,
            max_grad_norm=0.5,
            lr=1e-4,
            gamma=
            0.99,  # Note that my results over the red/blue doors were computed using gamma=0.9!
            lam=0.95,
            log_interval=50,
            nminibatches=8,
            noptepochs=1,
            #save_interval=100,
            cliprange=0.2)
        if "lstm" in agent:
            # Adding a recurrent layer
            ppo_params["network"] = 'cnn_lstm'
            ppo_params["nlstm"] = 128
            ppo_params["conv_fn"] = mlp_net
            ppo_params["lr"] = 0.001
        else:
            # Using a standard MLP
            ppo_params["network"] = mlp_net

        timesteps = int(1e9)

        model = ppo2.learn(env=env, total_timesteps=timesteps, **ppo_params)
    else:
        assert False, agent + " hasn't been implemented yet"

    return model
コード例 #10
0
ファイル: models.py プロジェクト: grockious/deepsynth
def build_q_func(network,
                 hiddens=[256],
                 dueling=True,
                 layer_norm=False,
                 **network_kwargs):
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        network = get_network_builder(network)(**network_kwargs)

    def q_func_builder(input_shape, num_actions):
        # the sub Functional model which does not include the top layer.
        model = network(input_shape)

        # wrapping the sub Functional model with layers that compute action scores into another Functional model.
        latent = model.outputs
        if len(latent) > 1:
            if latent[1] is not None:
                raise NotImplementedError(
                    "DQN is not compatible with recurrent policies yet")
        latent = latent[0]

        latent = tf.keras.layers.Flatten()(latent)

        with tf.name_scope("action_value"):
            action_out = latent
            for hidden in hiddens:
                action_out = tf.keras.layers.Dense(units=hidden,
                                                   activation=None)(action_out)
                if layer_norm:
                    action_out = tf.keras.layers.LayerNormalization(
                        center=True, scale=True)(action_out)
                action_out = tf.nn.relu(action_out)
            action_scores = tf.keras.layers.Dense(units=num_actions,
                                                  activation=None)(action_out)

        if dueling:
            with tf.name_scope("state_value"):
                state_out = latent
                for hidden in hiddens:
                    state_out = tf.keras.layers.Dense(
                        units=hidden, activation=None)(state_out)
                    if layer_norm:
                        state_out = tf.keras.layers.LayerNormalization(
                            center=True, scale=True)(state_out)
                    state_out = tf.nn.relu(state_out)
                state_score = tf.keras.layers.Dense(units=1,
                                                    activation=None)(state_out)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(
                action_scores_mean, 1)
            q_out = state_score + action_scores_centered
        else:
            q_out = action_scores
        return tf.keras.Model(inputs=model.inputs, outputs=[q_out])

    return q_func_builder
コード例 #11
0
    def __init__(self, env, nbatch, network, **policy_kwargs):
        ob_space = env.observation_space
        self.X = observation_placeholder(ob_space, batch_size=nbatch)
        encoded_x = encode_observation(ob_space, self.X)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            self.net = get_network_builder(network)(**policy_kwargs)
            self.h1 = self.net(encoded_x)
        self.h2 = fc(self.h1, 'vf', 1)
        self.out = self.h2[:, 0]
コード例 #12
0
def build_policy(ob_space, ac_space, policy_network, normalize_observations=False,
                 sess=None, train=True, beta=1.0, l2=0., lr=0.001,
                 init_scale =0.01, init_bias=0.0, trainable_variance=True, state_dependent_variance=True,
                 trainable_bias=True,
                 init_logstd=0., clip=None, **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(scope_name="pi", nbatch=None, nsteps=None, sess=sess, observ_placeholder=None):

        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(ob_space, batch_size=nbatch)

        extra_tensors = {}

        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X

        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent, recurrent_tensors = policy_network(encoded_x)

            if recurrent_tensors is not None:
                # recurrent architecture, need a few more steps
                nenv = nbatch // nsteps
                assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(nbatch, nsteps)
                policy_latent, recurrent_tensors = policy_network(encoded_x, nenv)
                extra_tensors.update(recurrent_tensors)

        policy = Policy(
            observations=X,
            action_space=ac_space,
            latent=policy_latent,
            sess=sess,
            train=train,
            beta=beta,
            l2=l2,
            lr=lr,
            init_scale=init_scale,
            init_bias=init_bias,
            trainable_variance=trainable_variance,
            trainable_bias=trainable_bias,
            init_logstd=init_logstd,
            scope_name=scope_name,
            state_dependent_variance=state_dependent_variance,
            clip=clip,
            **extra_tensors
        )
        return policy

    return policy_fn
コード例 #13
0
ファイル: MyNN.py プロジェクト: yoniosin/A2C_new
def prio_network_builder(env, nbatch, nsteps, nenvs, network_type,
                         **policy_kwargs):
    network = get_network_builder(network_type)(**policy_kwargs)

    def prio_net_fn(nbatch=None, nsteps=None, sess=None):
        ob_space = env.observation_space

        X = observation_placeholder(ob_space, batch_size=nbatch)
        return MyNN

    return prio_net_fn()
コード例 #14
0
 def __init__(self, config, section):
     network_args = {}
     if config.has_option(section, 'num_layers'):
         network_args['num_layers'] = config.getint(section, 'num_layers')
     if config.has_option(section, 'num_hidden'):
         network_args['num_hidden'] = config.getint(section, 'num_hidden')
     if config.has_option(section, 'activation'):
         network_args['activation'] = eval(config.get(
             section, 'activation'))
     if config.has_option(section, 'layer_norm'):
         network_args['layer_norm'] = config.getboolean(
             section, 'layer_norm')
     self._network_fn = get_network_builder('mlp')(**network_args)
コード例 #15
0
ファイル: policies.py プロジェクト: maximilianigl/rl-iter
def build_policy(env,
                 policy_network,
                 arch,
                 value_network=None,
                 normalize_observations=False,
                 estimate_q=False,
                 **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None):
        ob_space = env.observation_space

        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(
            ob_space, batch_size=nbatch)

        extra_tensors = {}

        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X

        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent, policy_latent_mean, info_loss = policy_network(
                encoded_x)
            if isinstance(policy_latent, tuple):
                raise NotImplementedError()

        policy = PolicyWithValue(
            env=env,
            observations=X,
            arch=arch,
            latent=policy_latent,
            latent_mean=policy_latent_mean,
            info_loss=info_loss,
            # vf_latent=vf_latent,
            sess=sess,
            estimate_q=estimate_q,
            **extra_tensors)
        return policy

    return policy_fn
コード例 #16
0
ファイル: policies.py プロジェクト: guikarist/rl-asc
def build_policy(env,
                 policy_network='',
                 value_network=None,
                 normalize_observations=False,
                 estimate_q=False,
                 **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None, nsteps=None, sess=None):
        ob_space = env.observation_space

        Xs = tf.stack([observation_placeholder(ob_space, batch_size=nbatch)] *
                      3)

        extra_tensors = {}

        encoded_x_0 = encode_observation(ob_space, Xs[0])
        encoded_x_1 = encode_observation(ob_space, Xs[1])
        encoded_x_2 = encode_observation(ob_space, Xs[2])

        with tf.variable_scope('pi'):
            _, f_features_0 = policy_network(encoded_x_0)
        with tf.variable_scope('pi', reuse=True):
            policy_latent, f_features_1 = policy_network(encoded_x_1)
        with tf.variable_scope('pi', reuse=True):
            _, f_features_2 = policy_network(encoded_x_2)

        _v_net = value_network

        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            raise NotImplementedError

        policy = ModifiedPolicyWithValue(
            env=env,
            observations=Xs,
            latent=policy_latent,
            f_features=[f_features_0, f_features_1, f_features_2],
            vf_latent=vf_latent,
            sess=sess,
            estimate_q=estimate_q,
            **extra_tensors)
        return policy

    return policy_fn
コード例 #17
0
def build_qvalue(vf_model, actor_model, qvalue_network, **network_kwargs):
    if isinstance(qvalue_network, str):
        qvalue_network = get_network_builder(qvalue_network)(**network_kwargs)
    else:
        assert callable(qvalue_network)

    def qvalue_fn(sess=None):
        qf_input = tf.concat([vf_model.vf_latent, actor_model.pd_param],
                             axis=-1)
        with tf.variable_scope('qf', reuse=tf.AUTO_REUSE):
            qf_latent = qvalue_network(qf_input)
        qvalue = QValueModel(vf_model=vf_model,
                             actor_model=actor_model,
                             latent=qf_latent,
                             sess=sess)
        return qvalue

    return qvalue_fn
コード例 #18
0
ファイル: models.py プロジェクト: MrGoogol/baselines
def build_q_func(network, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        network = get_network_builder(network)(**network_kwargs)

    def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
        with tf.variable_scope(scope, reuse=reuse):
            latent = network(input_placeholder)
            if isinstance(latent, tuple):
                if latent[1] is not None:
                    raise NotImplementedError("DQN is not compatible with recurrent policies yet")
                latent = latent[0]

            latent = layers.flatten(latent)

            with tf.variable_scope("action_value"):
                action_out = latent
                for hidden in hiddens:
                    action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        action_out = layers.layer_norm(action_out, center=True, scale=True)
                    action_out = tf.nn.relu(action_out)
                action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

            if dueling:
                with tf.variable_scope("state_value"):
                    state_out = latent
                    for hidden in hiddens:
                        state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                        if layer_norm:
                            state_out = layers.layer_norm(state_out, center=True, scale=True)
                        state_out = tf.nn.relu(state_out)
                    state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
                action_scores_mean = tf.reduce_mean(action_scores, 1)
                action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
                q_out = state_score + action_scores_centered
            else:
                q_out = action_scores
            return q_out

    return q_func_builder
コード例 #19
0
def build_hr_func(network, hiddens=[16], layer_norm=False, **network_kwargs):
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        network = get_network_builder(network)(**network_kwargs)

    def hr_func_builder(input_placeholder,
                        num_actions,
                        scope="hr_func",
                        reuse=False):
        with tf.variable_scope(scope, reuse=reuse):
            latent = network(input_placeholder)
            if isinstance(latent, tuple):
                if latent[1] is not None:
                    raise NotImplementedError(
                        "HR is not compatible with recurrent policies yet")
                latent = latent[0]

            latent = layers.flatten(latent)

            with tf.variable_scope("feedback_predictor"):
                action_out = latent
                for hidden in hiddens:
                    action_out = layers.fully_connected(action_out,
                                                        num_outputs=hidden,
                                                        activation_fn=None)
                    if layer_norm:
                        action_out = layers.layer_norm(action_out,
                                                       center=True,
                                                       scale=True)
                    action_out = tf.nn.relu(action_out)
                predicted_feedback = layers.fully_connected(
                    action_out,
                    num_outputs=num_actions,
                    activation_fn=tf.nn.sigmoid)

            return predicted_feedback

    return hr_func_builder
コード例 #20
0
def build_policy(env,
                 policy_network,
                 value_network=None,
                 normalize_observations=False,
                 estimate_q=False,
                 limit_act_range=False,
                 **policy_kwargs):
    """Daniel: builds the policy and value network.

    When calling `get_network_builder`, we look at the provided models, but
    these will give us 'latent' features. We can combine models together, and
    then if we do that (or if not) the last layer *before* the last dense layer
    is considered to be the 'latent' one. For example, if we call mlp, by
    default we get two hidden layers with tanh, so we have:

        input --> 64 --> tanh --> 64 --> tanh

    and then there is a 64 dimensional tensor (see `policy_latent`) that we
    pass as input to the next layer(s). If you're wondering how we get to the
    action dimension, look at `baselines.common.distributions` and see
    `pdfromlatent` methods. They do a final 'matching' dense layer, but with no
    activation by default. To avoid that, we need to form a new layer with the
    correct output dimensions based on the environment's action space.

    To debug network construction, use:
        tf_util.display_var_info(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES))

    Tricky, returns the inner function here, which then makes the policy via
    the class above, which exposes `self.action`, `self.pi`, etc., to use.
    """
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None):
        ob_space = env.observation_space
        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(
            ob_space, batch_size=nbatch)

        extra_tensors = {}
        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X
        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(encoded_x)
            if isinstance(policy_latent, tuple):
                policy_latent, recurrent_tensors = policy_latent

                if recurrent_tensors is not None:
                    # recurrent architecture, need a few more steps
                    nenv = nbatch // nsteps
                    assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(
                        nbatch, nsteps)
                    policy_latent, recurrent_tensors = policy_network(
                        encoded_x, nenv)
                    extra_tensors.update(recurrent_tensors)

        _v_net = value_network
        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            if _v_net == 'copy':
                _v_net = policy_network
            else:
                assert callable(_v_net)

            with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
                # TODO recurrent architectures are not supported with value_network=copy yet
                vf_latent = _v_net(encoded_x)

        # Daniel: doing this for action range.
        if limit_act_range:
            policy_latent = tf.nn.tanh(
                fc(policy_latent,
                   'pi',
                   env.action_space.shape[0],
                   init_scale=0.01,
                   init_bias=0.0))

        policy = PolicyWithValue(env=env,
                                 observations=X,
                                 latent=policy_latent,
                                 vf_latent=vf_latent,
                                 sess=sess,
                                 estimate_q=estimate_q,
                                 **extra_tensors)
        return policy

    return policy_fn
コード例 #21
0
ファイル: models.py プロジェクト: gilwoolee/brl_baselines
def build_q_func(network, num_experts, hiddens=[256], dueling=True, layer_norm=False, **network_kwargs):
    assert isinstance(network, str)
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        # with tf.variable_scope("inp"):
        inp_network = get_network_builder(network)(**network_kwargs)
        # with tf.variable_scope("bel"):
        bel_network = get_network_builder(network)(**network_kwargs)

    def q_func_builder(input_placeholder, belief_placeholder, expert_q_ph, num_actions, scope, reuse=False):
        # input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INP :', summarize=64*48)

        with tf.variable_scope(scope, reuse=reuse):
            # input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INPUT: ', summarize=100)
            latent_inp = inp_network(input_placeholder)
            if isinstance(latent_inp, tuple):
                if latent_inp[1] is not None:
                    raise NotImplementedError("DQN is not compatible with recurrent policies yet")
                latent_inp = latent_inp[0]

            latent_inp = layers.flatten(latent_inp)

        # belief_placeholder = tf.Print(belief_placeholder, [belief_placeholder], '>>>> BEL :', summarize=64*48)

        with tf.variable_scope(scope, reuse=reuse):

            with tf.variable_scope("bel", reuse=reuse):
                # residual network takes both input and bel
                latent_bel = bel_network(belief_placeholder)
                if isinstance(latent_bel, tuple):
                    if latent_bel[1] is not None:
                        raise NotImplementedError("DQN is not compatible with recurrent policies yet")
                    latent_bel = latent_bel[0]

                latent_bel = layers.flatten(latent_bel)
                stacked = tf.stack([latent_inp, latent_bel], axis=1)
                latent = layers.flatten(stacked)

                with tf.variable_scope("action_value"):
                    action_out = latent
                    for hidden in hiddens:
                        action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                        if layer_norm:
                            action_out = layers.layer_norm(action_out, center=True, scale=True)
                        action_out = tf.nn.relu(action_out)
                    action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

                if dueling:
                    with tf.variable_scope("state_value"):
                        state_out = latent
                        for hidden in hiddens:
                            state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                            if layer_norm:
                                state_out = layers.layer_norm(state_out, center=True, scale=True)
                            state_out = tf.nn.relu(state_out)
                        state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
                    action_scores_mean = tf.reduce_mean(action_scores, 1)
                    action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
                    q_out = state_score + action_scores_centered
                else:
                    q_out = action_scores

                log_denominator = tf.log(tf.constant(num_experts, dtype=belief_placeholder.dtype))
                entropy = 0.05 * -1 * tf.reduce_sum(belief_placeholder * tf.log(belief_placeholder + 1e-5), axis=1)  / log_denominator
                entropy = tf.tile(tf.reshape(entropy, [tf.shape(entropy)[0], 1]), [1, num_actions])

                # q_out = tf.Print(q_out, [q_out], '>>>> QOUT :', summarize=3)
                # expert_q_ph = tf.Print(expert_q_ph, [expert_q_ph], '>>>> EXP :', summarize=3)

                q_out = q_out * entropy + (1.0 - entropy) * expert_q_ph

                # with tf.variable_scope("action_value_residual_final"):
                    # q_out = layers.fully_connected(q_out, num_outputs=num_actions, activation_fn=tf.nn.relu)
                    # q_out = layers.fully_connected(q_out, num_outputs=num_actions, activation_fn=None)

            # q_out = tf.Print(q_out, [q_out], '>>>> FOUT :', summarize=3)

            return q_out

    return q_func_builder
コード例 #22
0
ファイル: models.py プロジェクト: MrGoogol/baselines
 def __init__(self, name, network='mlp', **network_kwargs):
     self.name = name
     self.network_builder = get_network_builder(network)(**network_kwargs)
コード例 #23
0
 def __init__(self, name, network='mlp', **network_kwargs):
     self.name = name
     self.network = network
     if network != 'cloth_cnn':
         self.network_builder = get_network_builder(network)(**network_kwargs)
コード例 #24
0
ファイル: models.py プロジェクト: Klanly/MySharedRepository
 def __init__(self, name, network='mlp', **network_kwargs):
     self.name = name
     self.network_builder = get_network_builder(network)(**network_kwargs)
     self.encoder_builder = get_network_builder("encoder_mlp")(
         **network_kwargs)
コード例 #25
0
 def __init__(self, name, nenvs, network='mlp', **network_kwargs):
     self.name = name
     self.network_builder = get_network_builder(network)(**network_kwargs)
     self.nenvs = nenvs
コード例 #26
0
ファイル: policies.py プロジェクト: ziv-lin/baselines
def build_policy(env,
                 policy_network,
                 value_network=None,
                 normalize_observations=False,
                 estimate_q=False,
                 **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None):
        ob_space = env.observation_space

        observation_plh = observ_placeholder if observ_placeholder is not None else observation_placeholder(
            ob_space, batch_size=nbatch)

        extra_tensors = {}

        if normalize_observations and observation_plh.dtype == tf.float32:
            ob_plh_normalize_clip, rms = _normalize_clip_observation(
                observation_plh)
            extra_tensors['rms'] = rms
        else:
            ob_plh_normalize_clip = observation_plh

        ob_plh_normalize_clip = encode_observation(ob_space,
                                                   ob_plh_normalize_clip)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent, recurrent_tensors = policy_network(
                ob_plh_normalize_clip)

            if recurrent_tensors is not None:
                # recurrent architecture, need a few more steps
                nenv = nbatch // nsteps
                assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(
                    nbatch, nsteps)
                policy_latent, recurrent_tensors = policy_network(
                    ob_plh_normalize_clip, nenv)
                extra_tensors.update(recurrent_tensors)

        val_net = value_network

        if val_net is None or val_net == 'shared':
            vf_latent = policy_latent
        else:
            if val_net == 'copy':
                val_net = policy_network
            else:
                assert callable(val_net)

            with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
                vf_latent, _ = val_net(ob_plh_normalize_clip)

        policy = PolicyWithValue(env=env,
                                 observations=observation_plh,
                                 policy_latent=policy_latent,
                                 vf_latent=vf_latent,
                                 sess=sess,
                                 estimate_q=estimate_q,
                                 **extra_tensors)
        return policy

    # Return policy = PolicyWithValue(...)
    return policy_fn
コード例 #27
0
ファイル: netrand_policy.py プロジェクト: jajajag/mixreg
def build_policy(env,
                 policy_network,
                 value_network=None,
                 normalize_observations=False,
                 estimate_q=False,
                 **policy_kwargs):
    if isinstance(policy_network, str):
        network_type = policy_network
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    def policy_fn(nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None,
                  randomization=True):
        ob_space = env.observation_space

        extra_tensors = {}

        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(
            ob_space, batch_size=None)

        encoded_x = encode_observation(ob_space, X)

        # Randomization
        if randomization:
            encoded_x = tf.layers.conv2d(
                encoded_x / 255.,
                3,
                3,
                padding='same',
                kernel_initializer=tf.initializers.glorot_normal(),
                trainable=False,
                name='randcnn') * 255.
            randcnn_param = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                              scope="ppo2_model/randcnn")
            extra_tensors['randcnn_param'] = randcnn_param

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(encoded_x)
            extra_tensors['latent_fts'] = policy_latent
            if isinstance(policy_latent, tuple):
                policy_latent, recurrent_tensors = policy_latent

                if recurrent_tensors is not None:
                    # recurrent architecture, need a few more steps
                    nenv = nbatch // nsteps
                    assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(
                        nbatch, nsteps)
                    policy_latent, recurrent_tensors = policy_network(
                        encoded_x, nenv)
                    extra_tensors.update(recurrent_tensors)

        _v_net = value_network

        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            if _v_net == 'copy':
                _v_net = policy_network
            else:
                assert callable(_v_net)

            with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
                # TODO recurrent architectures are not supported with value_network=copy yet
                vf_latent = _v_net(encoded_x)

        policy = PolicyWithValue(env=env,
                                 observations=X,
                                 latent=policy_latent,
                                 vf_latent=vf_latent,
                                 sess=sess,
                                 estimate_q=estimate_q,
                                 **extra_tensors)
        return policy

    return policy_fn
コード例 #28
0
ファイル: trpo_mpi.py プロジェクト: idthanm/baselines4dsac
def learn(
        *,
        network,
        env,
        eval_env,
        total_timesteps,
        timesteps_per_batch=1024,  # what to train on
        max_kl=0.001,
        cg_iters=10,
        gamma=0.99,
        lam=1.0,  # advantage estimation
        seed=None,
        ent_coef=0.0,
        cg_damping=1e-2,
        vf_stepsize=3e-4,
        vf_iters=3,
        log_path=None,
        max_episodes=0,
        max_iters=0,  # time constraint
        callback=None,
        load_path=None,
        **network_kwargs):
    '''
    learn a policy function with TRPO algorithm

    Parameters:
    ----------

    network                 neural network to learn. Can be either string ('mlp', 'cnn', 'lstm', 'lnlstm' for basic types)
                            or function that takes input placeholder and returns tuple (output, None) for feedforward nets
                            or (output, (state_placeholder, state_output, mask_placeholder)) for recurrent nets

    env                     environment (one of the gym environments or wrapped via baselines.common.vec_env.VecEnv-type class

    timesteps_per_batch     timesteps per gradient estimation batch

    max_kl                  max KL divergence between old policy and new policy ( KL(pi_old || pi) )

    ent_coef                coefficient of policy entropy term in the optimization objective

    cg_iters                number of iterations of conjugate gradient algorithm

    cg_damping              conjugate gradient damping

    vf_stepsize             learning rate for adam optimizer used to optimie value function loss

    vf_iters                number of iterations of value function optimization iterations per each policy optimization step

    total_timesteps           max number of timesteps

    max_episodes            max number of episodes

    max_iters               maximum number of policy optimization iterations

    callback                function to be called with (locals(), globals()) each policy optimization step

    load_path               str, path to load the model from (default: None, i.e. no model is loaded)

    **network_kwargs        keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network

    Returns:
    -------

    learnt model

    '''

    if MPI is not None:
        nworkers = MPI.COMM_WORLD.Get_size()
        rank = MPI.COMM_WORLD.Get_rank()
    else:
        nworkers = 1
        rank = 0

    set_global_seeds(seed)

    np.set_printoptions(precision=3)
    # Setup losses and stuff
    # ----------------------------------------
    ob_space = env.observation_space
    ac_space = env.action_space

    if isinstance(network, str):
        network = get_network_builder(network)(**network_kwargs)

    with tf.name_scope("pi"):
        pi_policy_network = network(ob_space.shape)
        pi_value_network = network(ob_space.shape)
        pi = PolicyWithValue(ac_space, pi_policy_network, pi_value_network)
    with tf.name_scope("oldpi"):
        old_pi_policy_network = network(ob_space.shape)
        old_pi_value_network = network(ob_space.shape)
        oldpi = PolicyWithValue(ac_space, old_pi_policy_network,
                                old_pi_value_network)

    pi_var_list = pi_policy_network.trainable_variables + list(
        pi.pdtype.trainable_variables)
    old_pi_var_list = old_pi_policy_network.trainable_variables + list(
        oldpi.pdtype.trainable_variables)
    vf_var_list = pi_value_network.trainable_variables + pi.value_fc.trainable_variables
    old_vf_var_list = old_pi_value_network.trainable_variables + oldpi.value_fc.trainable_variables

    if load_path is not None:
        load_path = osp.expanduser(load_path)
        ckpt = tf.train.Checkpoint(model=pi)
        manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
        ckpt.restore(manager.latest_checkpoint)

    vfadam = MpiAdam(vf_var_list)

    get_flat = U.GetFlat(pi_var_list)
    set_from_flat = U.SetFromFlat(pi_var_list)
    loss_names = ["optimgain", "meankl", "entloss", "surrgain", "entropy"]
    shapes = [var.get_shape().as_list() for var in pi_var_list]

    def assign_old_eq_new():
        for pi_var, old_pi_var in zip(pi_var_list, old_pi_var_list):
            old_pi_var.assign(pi_var)
        for vf_var, old_vf_var in zip(vf_var_list, old_vf_var_list):
            old_vf_var.assign(vf_var)

    @tf.function
    def compute_lossandgrad(ob, ac, atarg):
        with tf.GradientTape() as tape:
            old_policy_latent = oldpi.policy_network(ob)
            old_pd, _ = oldpi.pdtype.pdfromlatent(old_policy_latent)
            policy_latent = pi.policy_network(ob)
            pd, _ = pi.pdtype.pdfromlatent(policy_latent)
            kloldnew = old_pd.kl(pd)
            ent = pd.entropy()
            meankl = tf.reduce_mean(kloldnew)
            meanent = tf.reduce_mean(ent)
            entbonus = ent_coef * meanent
            ratio = tf.exp(pd.logp(ac) - old_pd.logp(ac))
            surrgain = tf.reduce_mean(ratio * atarg)
            optimgain = surrgain + entbonus
            losses = [optimgain, meankl, entbonus, surrgain, meanent]
        gradients = tape.gradient(optimgain, pi_var_list)
        return losses + [U.flatgrad(gradients, pi_var_list)]

    @tf.function
    def compute_losses(ob, ac, atarg):
        old_policy_latent = oldpi.policy_network(ob)
        old_pd, _ = oldpi.pdtype.pdfromlatent(old_policy_latent)
        policy_latent = pi.policy_network(ob)
        pd, _ = pi.pdtype.pdfromlatent(policy_latent)
        kloldnew = old_pd.kl(pd)
        ent = pd.entropy()
        meankl = tf.reduce_mean(kloldnew)
        meanent = tf.reduce_mean(ent)
        entbonus = ent_coef * meanent
        ratio = tf.exp(pd.logp(ac) - old_pd.logp(ac))
        surrgain = tf.reduce_mean(ratio * atarg)
        optimgain = surrgain + entbonus
        losses = [optimgain, meankl, entbonus, surrgain, meanent]
        return losses

    #ob shape should be [batch_size, ob_dim], merged nenv
    #ret shape should be [batch_size]
    @tf.function
    def compute_vflossandgrad(ob, ret):
        with tf.GradientTape() as tape:
            pi_vf = pi.value(ob)
            vferr = tf.reduce_mean(tf.square(pi_vf - ret))
        return U.flatgrad(tape.gradient(vferr, vf_var_list), vf_var_list)

    @tf.function
    def compute_fvp(flat_tangent, ob, ac, atarg):
        with tf.GradientTape() as outter_tape:
            with tf.GradientTape() as inner_tape:
                old_policy_latent = oldpi.policy_network(ob)
                old_pd, _ = oldpi.pdtype.pdfromlatent(old_policy_latent)
                policy_latent = pi.policy_network(ob)
                pd, _ = pi.pdtype.pdfromlatent(policy_latent)
                kloldnew = old_pd.kl(pd)
                meankl = tf.reduce_mean(kloldnew)
            klgrads = inner_tape.gradient(meankl, pi_var_list)
            start = 0
            tangents = []
            for shape in shapes:
                sz = U.intprod(shape)
                tangents.append(
                    tf.reshape(flat_tangent[start:start + sz], shape))
                start += sz
            gvp = tf.add_n([
                tf.reduce_sum(g * tangent)
                for (g, tangent) in zipsame(klgrads, tangents)
            ])
        hessians_products = outter_tape.gradient(gvp, pi_var_list)
        fvp = U.flatgrad(hessians_products, pi_var_list)
        return fvp

    @contextmanager
    def timed(msg):
        if rank == 0:
            print(colorize(msg, color='magenta'))
            tstart = time.time()
            yield
            print(
                colorize("done in %.3f seconds" % (time.time() - tstart),
                         color='magenta'))
        else:
            yield

    def allmean(x):
        assert isinstance(x, np.ndarray)
        if MPI is not None:
            out = np.empty_like(x)
            MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)
            out /= nworkers
        else:
            out = np.copy(x)

        return out

    th_init = get_flat()
    if MPI is not None:
        MPI.COMM_WORLD.Bcast(th_init, root=0)

    set_from_flat(th_init)
    vfadam.sync()
    print("Init param sum", th_init.sum(), flush=True)

    # Prepare for rollouts
    # ----------------------------------------
    seg_gen = traj_segment_generator(pi, env, timesteps_per_batch)

    episodes_so_far = 0
    timesteps_so_far = 0
    iters_so_far = 0
    tstart = time.time()
    lenbuffer = deque(maxlen=40)  # rolling buffer for episode lengths
    rewbuffer = deque(maxlen=40)  # rolling buffer for episode rewards

    logdir = log_path + '/evaluator'
    modeldir = log_path + '/models'
    if not os.path.exists(logdir):
        os.makedirs(logdir)
    if not os.path.exists(modeldir):
        os.makedirs(modeldir)
    evaluator = Evaluator(env=eval_env, model=pi, logdir=logdir)
    max_inner_iter = 500000 if env.spec.id == 'InvertedDoublePendulum-v2' else 3000000
    epoch = vf_iters
    batch_size = timesteps_per_batch
    mb_size = 256
    inner_iter_per_iter = epoch * int(batch_size / mb_size)
    max_iter = int(max_inner_iter / inner_iter_per_iter)
    eval_num = 150
    eval_interval = save_interval = int(
        int(max_inner_iter / eval_num) / inner_iter_per_iter)

    if sum([max_iters > 0, total_timesteps > 0, max_episodes > 0]) == 0:
        # noththing to be done
        return pi

    assert sum([max_iters>0, total_timesteps>0, max_episodes>0]) < 2, \
        'out of max_iters, total_timesteps, and max_episodes only one should be specified'

    for update in range(1, max_iter + 1):
        if callback: callback(locals(), globals())
        # if total_timesteps and timesteps_so_far >= total_timesteps:
        #     break
        # elif max_episodes and episodes_so_far >= max_episodes:
        #     break
        # elif max_iters and iters_so_far >= max_iters:
        #     break
        logger.log("********** Iteration %i ************" % iters_so_far)
        if (update - 1) % eval_interval == 0:
            evaluator.run_evaluation(update - 1)
        if (update - 1) % save_interval == 0:
            ckpt = tf.train.Checkpoint(model=pi)
            ckpt.save(modeldir + '/ckpt_ite' + str((update - 1)))

        with timed("sampling"):
            seg = seg_gen.__next__()
        add_vtarg_and_adv(seg, gamma, lam)

        # ob, ac, atarg, ret, td1ret = map(np.concatenate, (obs, acs, atargs, rets, td1rets))
        ob, ac, atarg, tdlamret = seg["ob"], seg["ac"], seg["adv"], seg[
            "tdlamret"]
        ob = sf01(ob)
        vpredbefore = seg["vpred"]  # predicted value function before udpate
        atarg = (atarg - atarg.mean()
                 ) / atarg.std()  # standardized advantage function estimate

        if hasattr(pi, "ret_rms"): pi.ret_rms.update(tdlamret)
        if hasattr(pi, "ob_rms"):
            pi.ob_rms.update(ob)  # update running mean/std for policy

        args = ob, ac, atarg
        fvpargs = [arr[::5] for arr in args]

        def fisher_vector_product(p):
            return allmean(compute_fvp(p, *fvpargs).numpy()) + cg_damping * p

        assign_old_eq_new()  # set old parameter values to new parameter values
        with timed("computegrad"):
            *lossbefore, g = compute_lossandgrad(*args)
        lossbefore = allmean(np.array(lossbefore))
        g = g.numpy()
        g = allmean(g)
        if np.allclose(g, 0):
            logger.log("Got zero gradient. not updating")
        else:
            with timed("cg"):
                stepdir = cg(fisher_vector_product,
                             g,
                             cg_iters=cg_iters,
                             verbose=rank == 0)
            assert np.isfinite(stepdir).all()
            shs = .5 * stepdir.dot(fisher_vector_product(stepdir))
            lm = np.sqrt(shs / max_kl)
            # logger.log("lagrange multiplier:", lm, "gnorm:", np.linalg.norm(g))
            fullstep = stepdir / lm
            expectedimprove = g.dot(fullstep)
            surrbefore = lossbefore[0]
            stepsize = 1.0
            thbefore = get_flat()
            for _ in range(10):
                thnew = thbefore + fullstep * stepsize
                set_from_flat(thnew)
                meanlosses = surr, kl, *_ = allmean(
                    np.array(compute_losses(*args)))
                improve = surr - surrbefore
                logger.log("Expected: %.3f Actual: %.3f" %
                           (expectedimprove, improve))
                if not np.isfinite(meanlosses).all():
                    logger.log("Got non-finite value of losses -- bad!")
                elif kl > max_kl * 1.5:
                    logger.log("violated KL constraint. shrinking step.")
                elif improve < 0:
                    logger.log("surrogate didn't improve. shrinking step.")
                else:
                    logger.log("Stepsize OK!")
                    break
                stepsize *= .5
            else:
                logger.log("couldn't compute a good step")
                set_from_flat(thbefore)
            if nworkers > 1 and iters_so_far % 20 == 0:
                paramsums = MPI.COMM_WORLD.allgather(
                    (thnew.sum(), vfadam.getflat().sum()))  # list of tuples
                assert all(
                    np.allclose(ps, paramsums[0]) for ps in paramsums[1:])

        for (lossname, lossval) in zip(loss_names, meanlosses):
            logger.record_tabular(lossname, lossval)

        with timed("vf"):

            for _ in range(vf_iters):
                for (mbob, mbret) in dataset.iterbatches(
                    (seg["ob"], seg["tdlamret"]),
                        include_final_partial_batch=False,
                        batch_size=mb_size):
                    mbob = sf01(mbob)
                    g = allmean(compute_vflossandgrad(mbob, mbret).numpy())
                    vfadam.update(g, vf_stepsize)

        logger.record_tabular("ev_tdlam_before",
                              explained_variance(vpredbefore, tdlamret))

        lrlocal = (seg["ep_lens"], seg["ep_rets"])  # local values
        if MPI is not None:
            listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal)  # list of tuples
        else:
            listoflrpairs = [lrlocal]

        lens, rews = map(flatten_lists, zip(*listoflrpairs))
        lenbuffer.extend(lens)
        rewbuffer.extend(rews)

        logger.record_tabular("EpLenMean", np.mean(lenbuffer))
        logger.record_tabular("EpRewMean", np.mean(rewbuffer))
        logger.record_tabular("EpThisIter", len(lens))
        episodes_so_far += len(lens)
        timesteps_so_far += sum(lens)
        iters_so_far += 1

        logger.record_tabular("EpisodesSoFar", episodes_so_far)
        logger.record_tabular("TimestepsSoFar", timesteps_so_far)
        logger.record_tabular("TimeElapsed", time.time() - tstart)

        if rank == 0:
            logger.dump_tabular()

    return pi
コード例 #29
0
def build_policy(env,
                 policy_network,
                 value_network=None,
                 normalize_observations=False,
                 estimate_q=False,
                 **policy_kwargs):
    todropoutpi = policy_kwargs['dropoutpi'] < 1.0
    todropoutvf = policy_kwargs['dropoutvf'] < 1.0
    if isinstance(policy_network, str):
        network_type = policy_network
    else:
        network_type = 'mlp'

    if todropoutpi or todropoutvf:
        print("Dropout: policy = {}, value = {}".format(
            policy_kwargs['dropoutpi'], policy_kwargs['dropoutvf']))
        policy_network, dropoutpi_keep_prob, dropoutvf_keep_prob = get_network_builder(
            network_type)(**policy_kwargs)
    else:
        policy_network = get_network_builder(network_type)(**policy_kwargs)

    batchnormpi = policy_kwargs["batchnormpi"]
    batchnormvf = policy_kwargs["batchnormvf"]
    if batchnormpi and batchnormvf:
        policy_network, isbnpitrainmode, isbnvftrainmode = policy_network
    elif batchnormpi and not batchnormvf:
        policy_network, isbnpitrainmode = policy_network
    elif batchnormvf and not batchnormpi:
        policy_network, isbnvftrainmode = policy_network
    if batchnormpi:
        print("Batchnorm: Policy network")
    if batchnormvf:
        print("Batchnorm: Value network")

    def policy_fn(nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None):
        ob_space = env.observation_space

        X = observ_placeholder if observ_placeholder is not None else observation_placeholder(
            ob_space, batch_size=nbatch)

        extra_tensors = {}

        if normalize_observations and X.dtype == tf.float32:
            encoded_x, rms = _normalize_clip_observation(X)
            extra_tensors['rms'] = rms
        else:
            encoded_x = X

        encoded_x = encode_observation(ob_space, encoded_x)

        with tf.variable_scope('pi', reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(encoded_x, mode="pi")
            if isinstance(policy_latent, tuple):
                policy_latent, recurrent_tensors = policy_latent

                if recurrent_tensors is not None:
                    # recurrent architecture, need a few more steps
                    nenv = nbatch // nsteps
                    assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(
                        nbatch, nsteps)
                    policy_latent, recurrent_tensors = policy_network(
                        encoded_x, nenv)
                    extra_tensors.update(recurrent_tensors)

        _v_net = value_network

        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            if _v_net == 'copy':
                _v_net = policy_network
            else:
                assert callable(_v_net)

            with tf.variable_scope('vf', reuse=tf.AUTO_REUSE):
                # TODO recurrent architectures are not supported with value_network=copy yet
                vf_latent = _v_net(encoded_x, mode="vf")
        if todropoutpi:
            extra_tensors.update({"dropoutpi_keep_prob": dropoutpi_keep_prob})
        if todropoutvf:
            extra_tensors.update({"dropoutvf_keep_prob": dropoutvf_keep_prob})
        if batchnormpi:
            extra_tensors.update({"isbnpitrainmode": isbnpitrainmode})
        if batchnormvf:
            extra_tensors.update({"isbnvftrainmode": isbnvftrainmode})

        policy = PolicyWithValue(env=env,
                                 observations=X,
                                 latent=policy_latent,
                                 vf_latent=vf_latent,
                                 sess=sess,
                                 estimate_q=estimate_q,
                                 **extra_tensors)
        return policy

    ret = policy_fn
    if batchnormpi or batchnormvf:
        ret = (ret, )
        if batchnormpi:
            ret = ret + (isbnpitrainmode, )
        if batchnormvf:
            ret = ret + (isbnvftrainmode, )

    if todropoutpi or todropoutvf:
        return ret, dropoutpi_keep_prob, dropoutvf_keep_prob
    else:
        return ret
コード例 #30
0
def learn(*,
          network,
          env,
          total_timesteps,
          eval_env=None,
          seed=None,
          nsteps=2048,
          ent_coef=0.0,
          lr=3e-4,
          vf_coef=0.5,
          max_grad_norm=0.5,
          gamma=0.99,
          lam=0.95,
          log_interval=10,
          nminibatches=4,
          noptepochs=4,
          cliprange=0.2,
          save_interval=0,
          load_path=None,
          model_fn=None,
          **network_kwargs):
    '''
    Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)

    Parameters:
    ----------

    network:                          policy network architecture. Either string (mlp, lstm, lnlstm, cnn_lstm, cnn, cnn_small, conv_only - see baselines.common/models.py for full list)
                                      specifying the standard network architecture, or a function that takes tensorflow tensor as input and returns
                                      tuple (output_tensor, extra_feed) where output tensor is the last network layer output, extra_feed is None for feed-forward
                                      neural nets, and extra_feed is a dictionary describing how to feed state into the network for recurrent neural nets.
                                      See common/models.py/lstm for more details on using recurrent nets in policies

    env: baselines.common.vec_env.VecEnv     environment. Needs to be vectorized for parallel environment simulation.
                                      The environments produced by gym.make can be wrapped using baselines.common.vec_env.DummyVecEnv class.


    nsteps: int                       number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
                                      nenv is number of environment copies simulated in parallel)

    total_timesteps: int              number of timesteps (i.e. number of actions taken in the environment)

    ent_coef: float                   policy entropy coefficient in the optimization objective

    lr: float or function             learning rate, constant or a schedule function [0,1] -> R+ where 1 is beginning of the
                                      training and 0 is the end of the training.

    vf_coef: float                    value function loss coefficient in the optimization objective

    max_grad_norm: float or None      gradient norm clipping coefficient

    gamma: float                      discounting factor

    lam: float                        advantage estimation discounting factor (lambda in the paper)

    log_interval: int                 number of timesteps between logging events

    nminibatches: int                 number of training minibatches per update. For recurrent policies,
                                      should be smaller or equal than number of environments run in parallel.

    noptepochs: int                   number of training epochs per update

    cliprange: float or function      clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
                                      and 0 is the end of the training

    save_interval: int                number of timesteps between saving events

    load_path: str                    path to load the model from

    **network_kwargs:                 keyword arguments to the policy / network builder. See baselines.common/policies.py/build_policy and arguments to a particular type of network
                                      For instance, 'mlp' network architecture has arguments num_hidden and num_layers.



    '''

    set_global_seeds(seed)

    total_timesteps = int(total_timesteps)

    # Get the nb of env
    nenvs = env.num_envs

    # Get state_space and action_space
    ob_space = env.observation_space
    ac_space = env.action_space

    if isinstance(network, str):
        network_type = network
        policy_network_fn = get_network_builder(network_type)(**network_kwargs)
        policy_network = policy_network_fn(ob_space.shape)

    # Calculate the batch_size
    nbatch = nenvs * nsteps
    nbatch_train = nbatch // nminibatches

    # Instantiate the model object (that creates act_model and train_model)
    if model_fn is None:
        from baselines.ppo2.model import Model
        model_fn = Model

    model = model_fn(ac_space=ac_space,
                     policy_network=policy_network,
                     ent_coef=ent_coef,
                     vf_coef=vf_coef,
                     max_grad_norm=max_grad_norm,
                     lr=lr)
    if load_path is not None:
        load_path = osp.expanduser(load_path)
        ckpt = tf.train.Checkpoint(model=model)
        manager = tf.train.CheckpointManager(ckpt, load_path, max_to_keep=None)
        ckpt.restore(manager.latest_checkpoint)
        print("Restoring from {}".format(manager.latest_checkpoint))
        print('after restore, all trainable weights {}'.format(
            model.train_model.policy_network.trainable_weights))
        #model.load_weights(load_path)

    # Instantiate the runner object
    runner = Runner(env=env, model=model, nsteps=nsteps, gamma=gamma, lam=lam)
    if eval_env is not None:
        eval_runner = Runner(env=eval_env,
                             model=model,
                             nsteps=nsteps,
                             gamma=gamma,
                             lam=lam)

    epinfobuf = deque(maxlen=100)
    if eval_env is not None:
        eval_epinfobuf = deque(maxlen=100)

    # Start total timer
    tfirststart = time.perf_counter()

    nupdates = total_timesteps // nbatch
    for update in range(1, nupdates + 1):
        assert nbatch % nminibatches == 0
        # Start timer
        tstart = time.perf_counter()
        frac = 1.0 - (update - 1.0) / nupdates
        # Calculate the learning rate
        #lrnow = lr(frac)
        lrnow = lr
        # Get minibatch
        obs, returns, masks, actions, values, neglogpacs, states, epinfos = runner.run(
        )  #pylint: disable=E0632
        if eval_env is not None:
            eval_obs, eval_returns, eval_masks, eval_actions, eval_values, eval_neglogpacs, eval_states, eval_epinfos = eval_runner.run(
            )  #pylint: disable=E0632

        epinfobuf.extend(epinfos)
        if eval_env is not None:
            eval_epinfobuf.extend(eval_epinfos)

        # Here what we're going to do is for each minibatch calculate the loss and append it.
        mblossvals = []
        if states is None:  # nonrecurrent version
            # Index of each element of batch_size
            # Create the indices array
            inds = np.arange(nbatch)
            for _ in range(noptepochs):
                # Randomize the indexes
                np.random.shuffle(inds)
                # 0 to batch_size with batch_train_size step
                for start in range(0, nbatch, nbatch_train):
                    end = start + nbatch_train
                    mbinds = inds[start:end]
                    slices = (tf.constant(arr[mbinds])
                              for arr in (obs, returns, masks, actions, values,
                                          neglogpacs))
                    # slice_obs, slice_returns, slice_masks, slice_actions, slice_values, slice_neglogpacs = (arr[mbinds] for arr in  (obs, returns, masks, actions, values, neglogpacs))
                    # slice_advs = slice_returns - slice_values
                    # slice_advs = (slice_advs - slice_advs.mean()) / (slice_advs.std() + 1e-8)
                    # slices = (tf.constant(slice_obs), tf.constant(slice_returns), tf.constant(slice_masks),
                    #     tf.constant(slice_advs), tf.constant(slice_actions), tf.constant(slice_values), tf.constant(slice_neglogpacs))
                    # print('slice actions {}'.format(slice_actions.dtype))
                    # print('-------------------------------------------')
                    # print('inds {}'.format(inds))
                    # print('slice obs {}'.format(slice_obs))
                    # print('slice returns {}'.format(slice_returns))
                    # print('slice masks {}'.format(slice_masks))
                    # print('slice actions {}'.format(slice_actions))
                    # print('slice values {}'.format(slice_values))
                    # print('slice neglogpacs {}'.format(slice_neglogpacs))
                    # print('slice advs {}'.format(slice_advs))
                    pg_loss, vf_loss, entropy, approxkl, clipfrac, vpred, vpredclipped = model.train(
                        lrnow, cliprange, *slices)
                    # pg_loss, vf_loss, entropy, approxkl, clipfrac, vpred, vpredclipped = model.train(
                    #     cliprange, obs=slice_obs, returns=slice_returns, masks=slice_masks, advs=slice_advs,
                    #     actions=slice_actions, values=slice_values, neglogpac_old=slice_neglogpacs)
                    # print('pg_loss {}'.format(pg_loss))
                    # print('vf_loss {}'.format(vf_loss))
                    # print('entropy {}'.format(entropy))
                    # print('approxkl {}'.format(approxkl))
                    # print('clipfrac {}'.format(clipfrac))
                    # print('vpred {}'.format(vpred))
                    # print('vpredclipped {}'.format(vpredclipped))
                    # print('pg_loss1 {}'.format(pg_loss1))
                    # print('pg_loss2 {}'.format(pg_loss2))
                    # train_model = model.train_model
                    # params = train_model.policy_network.trainable_weights + train_model.value_fc.trainable_weights + train_model.pdtype.matching_fc.trainable_weights
                    # for param in params:
                    #     print('param {} is {}'.format(param.name, param.numpy()))
                    # print('-------------------------------------------')
                    mblossvals.append([
                        pg_loss.numpy(),
                        vf_loss.numpy(),
                        entropy.numpy(),
                        approxkl.numpy(),
                        clipfrac.numpy()
                    ])
                    # mblossvals.append([output for output.numpy() in model.train(cliprange, *slices)])
        else:  # recurrent version
            raise ValueError('Not Support Yet')

        # Feedforward --> get losses --> update
        lossvals = np.mean(mblossvals, axis=0)
        # End timer
        tnow = time.perf_counter()
        # Calculate the fps (frame per second)
        fps = int(nbatch / (tnow - tstart))
        if update % log_interval == 0 or update == 1:
            # Calculates if value function is a good predicator of the returns (ev > 1)
            # or if it's just worse than predicting nothing (ev =< 0)
            ev = explained_variance(values, returns)
            logger.logkv("serial_timesteps", update * nsteps)
            logger.logkv("nupdates", update)
            logger.logkv("total_timesteps", update * nbatch)
            logger.logkv("fps", fps)
            logger.logkv("explained_variance", float(ev))
            logger.logkv('eprewmean',
                         safemean([epinfo['r'] for epinfo in epinfobuf]))
            logger.logkv('eplenmean',
                         safemean([epinfo['l'] for epinfo in epinfobuf]))
            if eval_env is not None:
                logger.logkv(
                    'eval_eprewmean',
                    safemean([epinfo['r'] for epinfo in eval_epinfobuf]))
                logger.logkv(
                    'eval_eplenmean',
                    safemean([epinfo['l'] for epinfo in eval_epinfobuf]))
            logger.logkv('time_elapsed', tnow - tfirststart)
            for (lossval, lossname) in zip(lossvals, model.loss_names):
                logger.logkv(lossname, lossval)
            if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:
                logger.dumpkvs()
    return model
コード例 #31
0
ファイル: policies.py プロジェクト: AndyLc/baselines
    def policy_fn(nbatch=None,
                  nsteps=None,
                  sess=None,
                  observ_placeholder=None,
                  encoded_x=None):
        ob_space = env.observation_space
        extra_tensors = {}

        if observ_placeholder is None:
            X = observation_placeholder(ob_space, batch_size=nbatch)
            if normalize_observations and X.dtype == tf.float32:
                new_encoded_x, rms = _normalize_clip_observation(X)
                extra_tensors['rms'] = rms
            else:
                new_encoded_x = X

            new_encoded_x = encode_observation(ob_space, new_encoded_x)
            new_encoded_x = get_network_builder("cnn")(
                **policy_kwargs)(new_encoded_x)
        else:
            X = observ_placeholder
            new_encoded_x = encoded_x

        with tf.variable_scope('pi' + str(head), reuse=tf.AUTO_REUSE):
            policy_latent = policy_network(new_encoded_x)
            if isinstance(policy_latent, tuple):
                policy_latent, recurrent_tensors = policy_latent

                if recurrent_tensors is not None:
                    # recurrent architecture, need a few more steps
                    nenv = nbatch // nsteps
                    assert nenv > 0, 'Bad input for recurrent policy: batch size {} smaller than nsteps {}'.format(
                        nbatch, nsteps)
                    policy_latent, recurrent_tensors = policy_network(
                        new_encoded_x, nenv)
                    extra_tensors.update(recurrent_tensors)

        _v_net = value_network

        if _v_net is None or _v_net == 'shared':
            vf_latent = policy_latent
        else:
            if _v_net == 'copy':
                _v_net = policy_network
            else:
                assert callable(_v_net)

            with tf.variable_scope('vf' + str(head), reuse=tf.AUTO_REUSE):
                vf_latent, _ = _v_net(new_encoded_x)

        policy = PolicyWithValue(
            env=env,
            observations=X,
            latent=policy_latent,
            head=head,
            vf_latent=vf_latent,  #this is the same as policy_latent...
            sess=sess,
            estimate_q=estimate_q,
            **extra_tensors)

        #print(policy.vf)

        return policy, X, new_encoded_x
コード例 #32
0
def build_q_func(network,
                 num_experts,
                 hiddens=[24],
                 dueling=False,
                 layer_norm=False,
                 **network_kwargs):
    assert isinstance(network, str)
    if isinstance(network, str):
        from baselines.common.models import get_network_builder
        # with tf.variable_scope("inp"):
        inp_network = get_network_builder(network)(**network_kwargs)
        # with tf.variable_scope("bel"):
        bel_network = get_network_builder(network)(**network_kwargs)

    # def q_func_builder(input_placeholder, belief_placeholder, num_actions, scope, reuse=False):
    #     with tf.variable_scope(scope, reuse=reuse):
    #         # input_placeholder = tf.Print(input_placeholder, [input_placeholder], '>>>> INPUT: ', summarize=100)
    #         latent_inp = inp_network(input_placeholder)
    #         if isinstance(latent_inp, tuple):
    #             if latent_inp[1] is not None:
    #                 raise NotImplementedError("DQN is not compatible with recurrent policies yet")
    #             latent_inp = latent_inp[0]

    #         latent_inp = layers.flatten(latent_inp)

    #     # Experts do not share
    #     out_list =[]
    #     #warms = [[[0.85, -10, 1]], [[0.85, 1, -10]]]
    #     with tf.variable_scope(scope + "_experts", reuse=reuse):

    #         for i in range(num_experts):
    #             scope_net = "action_value_expert_" + str(i)
    #             with tf.variable_scope(scope_net):
    #                 latent_inp_exp = inp_network(input_placeholder)
    #                 if isinstance(latent_inp_exp, tuple):
    #                     if latent_inp_exp[1] is not None:
    #                         raise NotImplementedError("DQN is not compatible with recurrent policies yet")
    #                     latent_inp_exp = latent_inp_exp[0]

    #                 action_out = layers.flatten(latent_inp_exp)

    #                 # for hidden in hiddens:
    #                 #     action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
    #                 #     if layer_norm:
    #                 #         action_out = layers.layer_norm(action_out, center=True, scale=True)
    #                 #     action_out = tf.nn.relu(action_out)
    #                 action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

    #                 if dueling:
    #                     with tf.variable_scope("state_value"):
    #                         state_out = latent_inp_exp
    #                         # for hidden in hiddens:
    #                         #     state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
    #                         #     if layer_norm:
    #                         #         state_out = layers.layer_norm(state_out, center=True, scale=True)
    #                         #     state_out = tf.nn.relu(state_out)
    #                         state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
    #                     action_scores_mean = tf.reduce_mean(action_scores, 1)
    #                     action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
    #                     q_out = state_score + action_scores_centered
    #                 else:
    #                     q_out = action_scores

    #                 #action_scores = tf.tile(tf.Variable(warms[i], name='var'), [tf.shape(belief_placeholder)[0], 1])
    #             out_list.append(action_scores)

    #         # import IPython; IPython.embed(); import sys; sys.exit()
    #         # This is for tiger, fixed
    #         # out_list = [tf.tile([[-0.05,1,-10]], [tf.shape(belief_placeholder)[0], 1]),
    #         #         tf.tile([[-0.05,-10,1]], [tf.shape(belief_placeholder)[0], 1])]

    #         # belief_placeholder = tf.Print(belief_placeholder, [belief_placeholder], '>>>> BELIEF: ', summarize=100)

    #         stacked_q_values = tf.stack(out_list, axis=0)
    #         qmean = []
    #         for i in range(num_actions):
    #             qmean += [tf.transpose(belief_placeholder) * stacked_q_values[:,:,i]]
    #         qmean = tf.math.reduce_sum(tf.stack(qmean, axis=2), axis=0)

    #         # qmean = tf.Print(qmean, [qmean], '>>>> QMEAN: ', summarize=3)

    #     with tf.variable_scope(scope, reuse=reuse):

    #         with tf.variable_scope("bel", reuse=reuse):
    #             # residual network takes both input and bel
    #             latent_bel = bel_network(belief_placeholder)
    #             if isinstance(latent_bel, tuple):
    #                 if latent_bel[1] is not None:
    #                     raise NotImplementedError("DQN is not compatible with recurrent policies yet")
    #                 latent_bel = latent_bel[0]

    #             latent_bel = layers.flatten(latent_bel)

    #             # latent_inp = tf.Print(latent_inp, [latent_inp], '>>>> LATENT_INP: ', summarize=64 * 3)
    #             # latent_bel = tf.Print(latent_bel, [latent_bel], '>>>> LATENT_BEL: ', summarize=64 * 3)

    #             stacked = tf.stack([latent_inp, latent_bel], axis=1)
    #             latent = layers.flatten(stacked)

    #             with tf.variable_scope("action_value"):
    #                 action_out = latent
    #                 # for hidden in hiddens:
    #                 #     action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=tf.nn.relu)
    #                 #     if layer_norm:
    #                 #         action_out = layers.layer_norm(action_out, center=True, scale=True)
    #                 #     action_out = tf.nn.relu(action_out)
    #                 action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

    #             # with tf.variable_scope("alpha"):
    #             #     alpha = latent
    #             #     for hidden in hiddens:
    #             #         alpha = layers.fully_connected(alpha, num_outputs=hidden, activation_fn=tf.nn.relu)
    #             #         # if layer_norm:
    #             #         alpha = layers.layer_norm(alpha, center=True, scale=True)
    #             #         # alpha = tf.nn.relu(alpha)
    #             #     alpha = layers.fully_connected(alpha, num_outputs=1, activation_fn=tf.nn.sigmoid)

    #             #alpha = tf.Print(alpha, [alpha], '>>>> alpha  :', summarize=3)
    #             # action_scores = tf.Print(action_scores, [action_scores], '>>>> action_scores 1 :', summarize=3)
    #             # action_scores = tf.Print(action_scores, [action_scores], '>>>> action_scores 2 :', summarize=3)

    #             if dueling:
    #                 with tf.variable_scope("state_value"):
    #                     state_out = latent
    #                     for hidden in hiddens:
    #                         state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
    #                         if layer_norm:
    #                             state_out = layers.layer_norm(state_out, center=True, scale=True)
    #                         state_out = tf.nn.relu(state_out)
    #                     state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
    #                 action_scores_mean = tf.reduce_mean(action_scores, 1)
    #                 action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
    #                 q_out = state_score + action_scores_centered
    #             else:
    #                 q_out = action_scores

    #             entropy = 0.0 * -1 * tf.reduce_sum(belief_placeholder * tf.log(belief_placeholder + 1e-5), axis=1)
    #             entropy = tf.tile(tf.reshape(entropy, [tf.shape(entropy)[0], 1]), [1, tf.shape(qmean)[1]])
    #             q_out = q_out * entropy + (1.0 - entropy) * qmean

    #         #q_out = tf.Print(q_out, [q_out], '>>>> QOUT :', summarize=3)

    #         return qmean, stacked_q_values # should be q_out, stacked_q_values

    def q_func_builder(input_placeholder,
                       belief_placeholder,
                       num_actions,
                       scope,
                       reuse=False):
        with tf.variable_scope(scope, reuse=reuse):
            print("Scope", scope, reuse, input_placeholder)
            latent = inp_network(input_placeholder)
            if isinstance(latent, tuple):
                if latent[1] is not None:
                    raise NotImplementedError(
                        "DQN is not compatible with recurrent policies yet")
                latent = latent[0]

            latent = layers.flatten(latent)

            with tf.variable_scope("action_value"):
                action_out = latent
                # for hidden in hiddens:
                #     action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                #     if layer_norm:
                #         action_out = layers.layer_norm(action_out, center=True, scale=True)
                #     action_out = tf.nn.relu(action_out)
                action_scores = layers.fully_connected(action_out,
                                                       num_outputs=num_actions,
                                                       activation_fn=None)

            if dueling:
                with tf.variable_scope("state_value"):
                    state_out = latent
                    # for hidden in hiddens:
                    #     state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                    #     if layer_norm:
                    #         state_out = layers.layer_norm(state_out, center=True, scale=True)
                    # state_out = tf.nn.relu(state_out)
                    state_score = layers.fully_connected(state_out,
                                                         num_outputs=1,
                                                         activation_fn=None)
                action_scores_mean = tf.reduce_mean(action_scores, 1)
                action_scores_centered = action_scores - tf.expand_dims(
                    action_scores_mean, 1)
                q_out = state_score + action_scores_centered
            else:
                q_out = action_scores
            # import IPython; IPython.embed(); import sys; sys.exit(0)
            return q_out, tf.expand_dims(q_out, -1)

    return q_func_builder
コード例 #33
0
    def __init__(self, agent, network, nsteps, rho, max_kl, ent_coef,
                 vf_stepsize, vf_iters, cg_damping, cg_iters, seed, load_path,
                 **network_kwargs):
        super(AgentModel, self).__init__(name='MATRPOModel')
        self.agent = agent
        self.nsteps = nsteps
        self.rho = rho
        self.max_kl = max_kl
        self.ent_coef = ent_coef
        self.cg_damping = cg_damping
        self.cg_iters = cg_iters
        self.vf_stepsize = vf_stepsize
        self.vf_iters = vf_iters

        set_global_seeds(seed)

        np.set_printoptions(precision=3)

        if MPI is not None:
            self.nworkers = MPI.COMM_WORLD.Get_size()
            self.rank = MPI.COMM_WORLD.Get_rank()
        else:
            self.nworkers = 1
            self.rank = 0

        # Setup losses and stuff
        # ----------------------------------------
        ob_space = agent.observation_space
        ac_space = agent.action_space

        if isinstance(network, str):
            network = get_network_builder(network)(**network_kwargs)

        with tf.name_scope(agent.name):
            with tf.name_scope("pi"):
                pi_policy_network = network(ob_space.shape)
                pi_value_network = network(ob_space.shape)
                self.pi = pi = PolicyWithValue(ac_space, pi_policy_network,
                                               pi_value_network)
            with tf.name_scope("oldpi"):
                old_pi_policy_network = network(ob_space.shape)
                old_pi_value_network = network(ob_space.shape)
                self.oldpi = oldpi = PolicyWithValue(ac_space,
                                                     old_pi_policy_network,
                                                     old_pi_value_network)

        self.comm_matrix = agent.comm_matrix.copy()
        self.estimates = np.ones([agent.nmates, nsteps], dtype=np.float32)
        self.multipliers = np.zeros([self.agent.nmates,
                                     self.nsteps]).astype(np.float32)
        for i, comm_i in enumerate(self.comm_matrix):
            self.estimates[i] = comm_i[self.agent.id] * self.estimates[i]

        pi_var_list = pi_policy_network.trainable_variables + list(
            pi.pdtype.trainable_variables)
        old_pi_var_list = old_pi_policy_network.trainable_variables + list(
            oldpi.pdtype.trainable_variables)
        vf_var_list = pi_value_network.trainable_variables + pi.value_fc.trainable_variables
        old_vf_var_list = old_pi_value_network.trainable_variables + oldpi.value_fc.trainable_variables

        self.pi_var_list = pi_var_list
        self.old_pi_var_list = old_pi_var_list
        self.vf_var_list = vf_var_list
        self.old_vf_var_list = old_vf_var_list

        if load_path is not None:
            load_path = osp.expanduser(load_path)
            ckpt = tf.train.Checkpoint(model=pi)
            manager = tf.train.CheckpointManager(ckpt,
                                                 load_path,
                                                 max_to_keep=None)
            ckpt.restore(manager.latest_checkpoint)

        self.vfadam = MpiAdam(vf_var_list)

        self.get_flat = U.GetFlat(pi_var_list)
        self.set_from_flat = U.SetFromFlat(pi_var_list)
        self.loss_names = [
            "Lagrange", "surrgain", "sync", "meankl", "entloss", "entropy"
        ]
        self.shapes = [var.get_shape().as_list() for var in pi_var_list]