def dist_info_sym(self, obs_var, state_info_vars):
        n_batches = tf.shape(obs_var)[0]
        n_steps = tf.shape(obs_var)[1]
        obs_var = tf.reshape(obs_var, tf.stack([n_batches, n_steps, -1]))

        if state_info_vars is not None and len(state_info_vars.keys()) != 0:
            obs_var = self.latent_sampler.merge_sym(obs_var, state_info_vars)

        if self.state_include_action:
            prev_action_var = state_info_vars["prev_action"]
            all_input_var = tf.concat(axis=2,
                                      values=[obs_var, prev_action_var])
        else:
            all_input_var = obs_var

        if self.feature_network is None:
            means, log_stds = L.get_output(
                [self.mean_network.output_layer, self.l_log_std],
                {self.l_input: all_input_var})
        else:
            flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
            means, log_stds = L.get_output(
                [self.mean_network.output_layer, self.l_log_std], {
                    self.l_input: all_input_var,
                    self.feature_network.input_layer: flat_input_var
                })
        return dict(mean=means, log_std=log_stds)
Example #2
0
 def dist_info_sym(self, obs_var, state_info_vars):
     n_batches = tf.shape(obs_var)[0]
     n_steps = tf.shape(obs_var)[1]
     obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, -1]))
     obs_var = tf.cast(obs_var, tf.float32)
     if self.state_include_action:
         prev_action_var = state_info_vars["prev_action"]
         prev_action_var = tf.cast(prev_action_var, tf.float32)
         all_input_var = tf.concat(2, [obs_var, prev_action_var])
     else:
         all_input_var = obs_var
     if self.feature_network is None:
         return dict(
             prob=L.get_output(
                 self.prob_network.output_layer,
                 {self.l_input: all_input_var}
             )
         )
     else:
         flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
         return dict(
             prob=L.get_output(
                 self.prob_network.output_layer,
                 {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
             )
         )
Example #3
0
    def __init__(self,
                 env_spec,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 action_merge_layer=-2,
                 output_nonlinearity=None,
                 bn=False,
                 dropout=.05):
        Serializable.quick_init(self, locals())

        l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim),
                             name="obs")
        l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim),
                                name="actions")

        n_layers = len(hidden_sizes) + 1

        if n_layers > 1:
            action_merge_layer = \
                (action_merge_layer % n_layers + n_layers) % n_layers
        else:
            action_merge_layer = 1

        l_hidden = l_obs

        for idx, size in enumerate(hidden_sizes):
            if bn:
                l_hidden = batch_norm(l_hidden)

            if idx == action_merge_layer:
                l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_hidden = L.DenseLayer(l_hidden,
                                    num_units=size,
                                    nonlinearity=hidden_nonlinearity,
                                    name="h%d" % (idx + 1))
            l_hidden = L.DropoutLayer(l_hidden, dropout)

        if action_merge_layer == n_layers:
            l_hidden = L.ConcatLayer([l_hidden, l_action])

        l_output = L.DenseLayer(l_hidden,
                                num_units=1,
                                nonlinearity=output_nonlinearity,
                                name="output")

        output_var = L.get_output(l_output, deterministic=True)
        output_var_drop = L.get_output(l_output, deterministic=False)

        self._f_qval = tensor_utils.compile_function(
            [l_obs.input_var, l_action.input_var], output_var)
        self._f_qval_drop = tensor_utils.compile_function(
            [l_obs.input_var, l_action.input_var], output_var_drop)
        self._output_layer = l_output
        self._obs_layer = l_obs
        self._action_layer = l_action
        self._output_nonlinearity = output_nonlinearity

        LayersPowered.__init__(self, [l_output])
Example #4
0
    def __init__(self,
                 name,
                 env_spec,
                 oracle_policy,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.relu,
                 output_nonlinearity=tf.nn.tanh,
                 output_nonlinearity_binary=tf.nn.softmax,
                 output_dim_binary=2,
                 prob_network=None,
                 bn=False):
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if prob_network is None:

                prob_network = SharedMLP(
                    input_shape=(env_spec.observation_space.flat_dim, ),
                    output_dim=env_spec.action_space.flat_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                    output_nonlinearity_binary=output_nonlinearity_binary,
                    output_dim_binary=output_dim_binary,
                    # batch_normalization=True,
                    name="prob_network",
                )

            self.oracle_policy = oracle_policy
            self._l_prob = prob_network.output_layer
            self._l_obs = prob_network.input_layer
            self._f_prob = tensor_utils.compile_function(
                [prob_network.input_layer.input_var],
                L.get_output(prob_network.output_layer, deterministic=True))

            self._f_prob_binary = tensor_utils.compile_function(
                [prob_network.input_layer.input_var],
                L.get_output(prob_network.output_layer_binary,
                             deterministic=True))

        self.output_layer_binary = prob_network.output_layer_binary

        self.binary_output = L.get_output(prob_network.output_layer_binary,
                                          deterministic=True)
        self.prob_network = prob_network

        # Note the deterministic=True argument. It makes sure that when getting
        # actions from single observations, we do not update params in the
        # batch normalization layers.
        # TODO: this doesn't currently work properly in the tf version so we leave out batch_norm
        super(SharedDeterministicMLPPolicy, self).__init__(env_spec)
        LayersPowered.__init__(
            self,
            [prob_network.output_layer, prob_network.output_layer_binary])
Example #5
0
 def get_phival_sym(self, obs_var, action_var, **kwargs):
     if self.vs_form is not None:
         phival, vs = L.get_output([self._output_layer, self._output_vs], {
             self._obs_layer: obs_var,
             self._action_layer: action_var
         }, **kwargs)
         phival = phival + vs
     else:
         phival = L.get_output(self._output_layer, {
             self._obs_layer: obs_var,
             self._action_layer: action_var
         }, **kwargs)
     phival = tf.reshape(phival, (-1, ))
     return phival
Example #6
0
 def compute_embeddings_given_state_action_pairs(self, obses, actions):
     result = dict()
     actions = tf.cast(actions, tf.float32)
     phi = L.get_output(self._l_phi, {self._l_state: obses})
     if self.reconciler is not None:
         reconciler_state_input = obses
     result['phi'] = phi
     result['psi'] = L.get_output(self._l_psi, {self._l_action: actions})
     if self.reconciler is not None:
         result['reconciler'] = L.get_output(
             self.reconciler.output_layer, {
                 self.reconciler.state_input_layer: reconciler_state_input,
                 self.reconciler.action_input_layer: actions,
             })
     return result
 def get_qval_sym(self, obs_var, action_var, **kwargs):
     qvals = L.get_output(
         self._output_layer,
         {self._obs_layer: obs_var, self._action_layer: action_var},
         **kwargs
     )
     return tf.reshape(qvals, (-1,))
Example #8
0
 def get_qval_sym(self, obs_var, action_var, **kwargs):
     qvals = L.get_output(
         self._output_layer,
         {self._obs_layer: obs_var, self._action_layer: action_var},
         **kwargs
     )
     return tf.reshape(qvals, (-1,))
Example #9
0
 def get_action_sym(self, obs_var):
     output_vec = L.get_output(self._output_vec_layer,
                               obs_var,
                               deterministic=True)
     action = tf.to_int64(tf.argmax(output_vec, 1))
     action_vec = tf.one_hot(action, self._n)
     return action_vec
Example #10
0
 def dist_info_sym(self, obs_var, state_info_vars=None):
     output_vec = L.get_output(
         self._output_vec_layer,
         {self._l_obs_layer: tf.cast(obs_var, tf.float32)},
         deterministic=True) / self._c
     prob = tf.nn.softmax(output_vec)
     return dict(prob=prob)
Example #11
0
    def get_phival_sym(self, obs_var, action_var, **kwargs):
        if self.vs_form is not None:
            fs, l_log_A, vs = L.get_output(
                [self.fs, self._l_log_A, self._output_vs], obs_var)
            phival = -tf.reduce_sum(
                tf.exp(l_log_A) * tf.square(action_var - fs),
                axis=1,
                keep_dims=True)
            phival += vs
        else:
            fs, l_log_A = L.get_output([self.fs, self._l_log_A], obs_var)

            phival = -tf.reduce_sum(
                tf.exp(l_log_A) * tf.square(action_var - fs), axis=1)

        return tf.reshape(phival, (-1, ))
Example #12
0
    def __init__(
        self,
        name,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        output_nonlinearity,
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer(),
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer(),
        input_var=None,
        input_layer=None,
        input_shape=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(l_hid,
                                     num_units=hidden_size,
                                     nonlinearity=hidden_nonlinearity,
                                     name="hidden_%d" % idx,
                                     W=hidden_W_init,
                                     b=hidden_b_init,
                                     weight_normalization=weight_normalization)
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)
            if batch_normalization:
                l_out = L.batch_norm(l_out)
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
 def get_e_qval_sym(self, obs_var, policy, **kwargs):
     if isinstance(policy, StochasticPolicy):
         agent_info = policy.dist_info_sym(obs_var)
         action_vec = agent_info['prob']
     else:
         raise NotImplementedError
     output_vec = L.get_output(self._output_vec_layer,
                               {self._obs_layer: obs_var}, **kwargs)
     return tf.reduce_sum(output_vec * action_vec, 1)
Example #14
0
 def dist_info_sym(self, obs_var, state_info_vars=None):
     obs_var = tf.cast(obs_var, tf.float32)
     # if empty dictionary then the latent variable has already been added
     if state_info_vars is not None and len(state_info_vars.keys()) != 0:
         obs_var = self.latent_sampler.merge_sym(obs_var, state_info_vars)
     mean_var, log_std_var = L.get_output([self._l_mean, self._l_std_param],
                                          obs_var)
     log_std_var = tf.maximum(log_std_var, self.min_std_param)
     return dict(mean=mean_var, log_std=log_std_var)
    def __init__(
            self,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.relu,
            action_merge_layer=-2,
            output_nonlinearity=None,
            bn=False):
        Serializable.quick_init(self, locals())

        l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs")
        l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions")

        n_layers = len(hidden_sizes) + 1

        if n_layers > 1:
            action_merge_layer = \
                (action_merge_layer % n_layers + n_layers) % n_layers
        else:
            action_merge_layer = 1

        l_hidden = l_obs

        for idx, size in enumerate(hidden_sizes):
            if bn:
                l_hidden = batch_norm(l_hidden)

            if idx == action_merge_layer:
                l_hidden = L.ConcatLayer([l_hidden, l_action])

            l_hidden = L.DenseLayer(
                l_hidden,
                num_units=size,
                nonlinearity=hidden_nonlinearity,
                name="h%d" % (idx + 1)
            )

        if action_merge_layer == n_layers:
            l_hidden = L.ConcatLayer([l_hidden, l_action])

        l_output = L.DenseLayer(
            l_hidden,
            num_units=1,
            nonlinearity=output_nonlinearity,
            name="output"
        )

        output_var = L.get_output(l_output, deterministic=True)

        self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var)
        self._output_layer = l_output
        self._obs_layer = l_obs
        self._action_layer = l_action
        self._output_nonlinearity = output_nonlinearity

        LayersPowered.__init__(self, [l_output])
    def log_likelihood_sym(self, x_var, y_var):
        normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var

        normalized_means_var, normalized_log_stds_var = \
            L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})

        means_var = normalized_means_var * self._y_std_var + self._y_mean_var
        log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)

        return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
    def log_likelihood_sym(self, x_var, y_var):
        normalized_xs_var = (x_var - self._x_mean_var) / self._x_std_var

        normalized_means_var, normalized_log_stds_var = \
            L.get_output([self._l_mean, self._l_log_std], {self._mean_network.input_layer: normalized_xs_var})

        means_var = normalized_means_var * self._y_std_var + self._y_mean_var
        log_stds_var = normalized_log_stds_var + TT.log(self._y_std_var)

        return self._dist.log_likelihood_sym(y_var, dict(mean=means_var, log_std=log_stds_var))
Example #18
0
 def dist_info_sym(self, obs_var, state_info_vars=None):
     mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
     if self.min_std_param is not None:
         std_param_var = tf.maximum(std_param_var, self.min_std_param)
     if self.std_parametrization == 'exp':
         log_std_var = std_param_var
     elif self.std_parametrization == 'softplus':
         log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
     else:
         raise NotImplementedError
     return dict(mean=mean_var, log_std=log_std_var)
    def __init__(
        self,
        name,
        env_spec,
        hidden_sizes=(32, 32),
        hidden_nonlinearity=tf.nn.tanh,
        prob_network=None,
    ):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        assert isinstance(env_spec.action_space, Discrete)
        obs_dim = env_spec.observation_space.flat_dim
        action_dim = env_spec.action_space.flat_dim

        with tf.variable_scope(name):
            if prob_network is None:
                prob_network = self.create_MLP(
                    input_shape=(obs_dim, ),
                    output_dim=env_spec.action_space.n,
                    hidden_sizes=hidden_sizes,
                    name="prob_network",
                )
            self._l_obs, self._l_prob = self.forward_MLP(
                'prob_network',
                prob_network,
                n_hidden=len(hidden_sizes),
                input_shape=(obs_dim, ),
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=tf.nn.softmax,
                reuse=None)

            # if you want to input your own tensor.
            self._forward_out = lambda x, is_train: self.forward_MLP(
                'prob_network',
                prob_network,
                n_hidden=len(hidden_sizes),
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                input_tensor=x,
                is_training=is_train)[1]

            self._f_prob = tensor_utils.compile_function([self._l_obs],
                                                         L.get_output(
                                                             self._l_prob))

            self._dist = Categorical(env_spec.action_space.n)
Example #20
0
 def dist_info_sym(self, obs_var, state_info_vars=None):
     mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
     if self.min_std_param is not None:
         std_param_var = tf.maximum(std_param_var, self.min_std_param)
     if self.std_parametrization == 'exp':
         log_std_var = std_param_var
     elif self.std_parametrization == 'softplus':
         log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
     else:
         raise NotImplementedError
     return dict(mean=mean_var, log_std=log_std_var)
Example #21
0
 def dist_info_sym(self, obs_var, state_info_vars):
     n_batches = tf.shape(obs_var)[0]
     n_steps = tf.shape(obs_var)[1]
     obs_var = tf.reshape(obs_var, tf.pack([n_batches, n_steps, -1]))
     if self.state_include_action:
         prev_action_var = state_info_vars["prev_action"]
         all_input_var = tf.concat(2, [obs_var, prev_action_var])
     else:
         all_input_var = obs_var
     if self.feature_network is None:
         means, log_stds = L.get_output(
             [self.mean_network.output_layer, self.l_log_std],
             {self.l_input: all_input_var}
         )
     else:
         flat_input_var = tf.reshape(all_input_var, (-1, self.input_dim))
         means, log_stds = L.get_output(
             [self.mean_network.output_layer, self.l_log_std],
             {self.l_input: all_input_var, self.feature_network.input_layer: flat_input_var}
         )
     return dict(mean=means, log_std=log_stds)
Example #22
0
            def fetch_policy_out(flat_input_var):
                if feature_network is None:
                    feature_var = flat_input_var
                else:
                    feature_var = L.get_output(
                        l_flat_feature,
                        {feature_network.input_layer: flat_input_var})

                inputs = [
                    flat_input_var,
                    mean_network.step_prev_state_layer.input_var,
                ]

                outputs = L.get_output(
                    [
                        mean_network.step_output_layer, l_step_log_std,
                        mean_network.step_hidden_layer,
                        mean_network.step_cell_layer
                    ], {mean_network.step_input_layer: feature_var})

                return inputs, outputs
Example #23
0
    def init_policy(self):
        output_vec = L.get_output(self._output_vec_layer,
                                  deterministic=True) / self._c
        prob = tf.nn.softmax(output_vec)
        max_qval = tf.reduce_logsumexp(output_vec, [1])

        self._f_prob = tensor_utils.compile_function(
            [self._obs_layer.input_var], prob)
        self._f_max_qvals = tensor_utils.compile_function(
            [self._obs_layer.input_var], max_qval)

        self._dist = Categorical(self._n)
Example #24
0
    def init_policy(self):
        output_vec = L.get_output(self._output_vec_layer, deterministic=True)
        action = tf.to_int64(tf.argmax(output_vec, 1))
        action_vec = tf.one_hot(action, self._n)
        max_qval = tf.reduce_max(output_vec, 1)

        self._f_actions = tensor_utils.compile_function(
            [self._obs_layer.input_var], action)
        self._f_actions_vec = tensor_utils.compile_function(
            [self._obs_layer.input_var], action_vec)
        self._f_max_qvals = tensor_utils.compile_function(
            [self._obs_layer.input_var], max_qval)
Example #25
0
 def create_MLP(
     self,
     name,
     output_dim,
     hidden_sizes,
     hidden_nonlinearity,
     output_nonlinearity,
     hidden_W_init=L.XavierUniformInitializer(),
     hidden_b_init=tf.zeros_initializer,
     output_W_init=L.XavierUniformInitializer(),
     output_b_init=tf.zeros_initializer,
     input_var=None,
     input_layer=None,
     input_shape=None,
     batch_normalization=False,
     weight_normalization=False,
 ):
     with tf.variable_scope(name):
         if input_layer is None:
             l_in = L.InputLayer(shape=(None, ) + input_shape,
                                 input_var=input_var,
                                 name="input")
         else:
             l_in = input_layer
         all_layers = [l_in]
         l_hid = l_in
         if batch_normalization:
             l_hid = L.batch_norm(l_hid)
         for idx, hidden_size in enumerate(hidden_sizes):
             l_hid = L.DenseLayer(l_hid,
                                  num_units=hidden_size,
                                  nonlinearity=hidden_nonlinearity,
                                  name="hidden_%d" % idx,
                                  W=hidden_W_init,
                                  b=hidden_b_init,
                                  weight_normalization=weight_normalization)
             if batch_normalization:
                 l_hid = L.batch_norm(l_hid)
             all_layers.append(l_hid)
         l_out = L.DenseLayer(l_hid,
                              num_units=output_dim,
                              nonlinearity=output_nonlinearity,
                              name="output",
                              W=output_W_init,
                              b=output_b_init,
                              weight_normalization=weight_normalization)
         if batch_normalization:
             l_out = L.batch_norm(l_out)
         all_layers.append(l_out)
         output = L.get_output(l_out)
         # returns layers(), input_layer, output_layer, input_var, output
         return all_layers, l_in, l_out, l_in.input_var, output
    def get_qval_plus_var_sym(self, obs_var, action_var, **kwargs):

        """
        TO DO HERE
        """

        mc_dropout = 5
        all_qvals = []
        for m in range(mc_dropout):
            qvals = L.get_output(self._output_layer, {self._obs_layer: obs_var, self._action_layer: action_var}, **kwargs)
            all_qvals = np.append(all_qvals, qvals)
        
        return tf.reshape(qvals, (-1,))
    def __init__(
        self,
        name,
        env_spec,
        conv_filters,
        conv_filter_sizes,
        conv_strides,
        conv_pads,
        hidden_sizes=[],
        hidden_nonlinearity=tf.nn.relu,
        output_nonlinearity=tf.nn.softmax,
        prob_network=None,
    ):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        assert isinstance(env_spec.action_space, Discrete)

        self._env_spec = env_spec
        # import pdb; pdb.set_trace()
        if prob_network is None:
            prob_network = ConvNetwork(
                input_shape=env_spec.observation_space.shape,
                output_dim=env_spec.action_space.n,
                conv_filters=conv_filters,
                conv_filter_sizes=conv_filter_sizes,
                conv_strides=conv_strides,
                conv_pads=conv_pads,
                hidden_sizes=hidden_sizes,
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                name="prob_network",
            )

        self._l_prob = prob_network.output_layer
        self._l_obs = prob_network.input_layer
        self._f_prob = tensor_utils.compile_function(
            [prob_network.input_layer.input_var],
            L.get_output(prob_network.output_layer))

        self._dist = Categorical(env_spec.action_space.n)

        super(CategoricalConvPolicy, self).__init__(env_spec)
        LayersPowered.__init__(self, [prob_network.output_layer])
    def __init__(
        self,
        name,
        env_spec,
        hidden_sizes=(32, 32),
        hidden_nonlinearity=tf.nn.tanh,
        output_nonlinearity=tf.nn.tanh,
        mean_network=None,
    ):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Box)

        with tf.variable_scope(name):

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            # create network
            if mean_network is None:
                mean_network = MLP(
                    name="mean_network",
                    input_shape=(obs_dim, ),
                    output_dim=action_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                )
            self._mean_network = mean_network

            l_mean = mean_network.output_layer
            obs_var = mean_network.input_layer.input_var

            self._l_mean = l_mean
            action_var = L.get_output(self._l_mean, deterministic=True)

            LayersPowered.__init__(self, [l_mean])
            super(DeterministicMLPPolicy, self).__init__(env_spec)

            self._f_actions = tensor_utils.compile_function(
                inputs=[obs_var],
                outputs=action_var,
            )
    def __init__(self,
                 name,
                 env_spec,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.tanh,
                 gating_network=None,
                 input_layer=None,
                 num_options=4,
                 conv_filters=None,
                 conv_filter_sizes=None,
                 conv_strides=None,
                 conv_pads=None,
                 input_shape=None):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        self.num_options = num_options

        assert isinstance(env_spec.action_space, Discrete)

        with tf.variable_scope(name):
            input_layer, output_layer = self.make_network(
                (env_spec.observation_space.flat_dim, ),
                env_spec.action_space.n,
                hidden_sizes,
                hidden_nonlinearity=hidden_nonlinearity,
                gating_network=gating_network,
                l_in=input_layer,
                conv_filters=conv_filters,
                conv_filter_sizes=conv_filter_sizes,
                conv_strides=conv_strides,
                conv_pads=conv_pads,
                input_shape=input_shape)
            self._l_prob = output_layer
            self._l_obs = input_layer

            self._f_prob = tensor_utils.compile_function(
                [input_layer.input_var], L.get_output(output_layer))

            self._dist = Categorical(env_spec.action_space.n)

            super(CategoricalDecomposedPolicy, self).__init__(env_spec)
            LayersPowered.__init__(self, [output_layer])
    def __init__(
            self,
            name,
            env_spec,
            conv_filters, conv_filter_sizes, conv_strides, conv_pads,
            hidden_sizes=[],
            hidden_nonlinearity=tf.nn.relu,
            output_nonlinearity=tf.nn.softmax,
            prob_network=None,
    ):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        assert isinstance(env_spec.action_space, Discrete)

        self._env_spec = env_spec
        # import pdb; pdb.set_trace()
        if prob_network is None:
            prob_network = ConvNetwork(
                input_shape=env_spec.observation_space.shape,
                output_dim=env_spec.action_space.n,
                conv_filters=conv_filters,
                conv_filter_sizes=conv_filter_sizes,
                conv_strides=conv_strides,
                conv_pads=conv_pads,
                hidden_sizes=hidden_sizes,
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                name="prob_network",
            )

        self._l_prob = prob_network.output_layer
        self._l_obs = prob_network.input_layer
        self._f_prob = tensor_utils.compile_function(
            [prob_network.input_layer.input_var],
            L.get_output(prob_network.output_layer)
        )

        self._dist = Categorical(env_spec.action_space.n)

        super(CategoricalConvPolicy, self).__init__(env_spec)
        LayersPowered.__init__(self, [prob_network.output_layer])
Example #31
0
    def get_phi_derive_sym(self, obs_var, action_var, **kwargs):
        fs, l_log_A = L.get_output([self.fs, self._l_log_A], obs_var, **kwargs)

        phival = tf.reduce_sum(-tf.exp(l_log_A) * tf.square(action_var - fs),
                               axis=1)
        phival = tf.reshape(phival, (-1, ))

        # Derivative
        phi_prime = -2. * tf.exp(l_log_A) * (action_var - fs)

        phi_double_prime = -2 * tf.exp(l_log_A)

        return dict(phival=phival,
                    phi_prime=phi_prime,
                    phi_double_prime=phi_double_prime)
 def dist_info_sym(self, obs_var, state_info_vars=None):
     # This function constructs the tf graph, only called during beginning of training
     # obs_var - observation tensor
     # mean_var - tensor for policy mean
     # std_param_var - tensor for policy std before output
     mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
     if self.min_std_param is not None:
         std_param_var = tf.maximum(std_param_var, self.min_std_param)
     if self.std_parametrization == 'exp':
         log_std_var = std_param_var
     elif self.std_parametrization == 'softplus':
         log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
     else:
         raise NotImplementedError
     return dict(mean=mean_var, log_std=log_std_var)
 def dist_info_sym(self, obs_var, state_info_vars=None):
     # This function constructs the tf graph, only called during beginning of training
     # obs_var - observation tensor
     # mean_var - tensor for policy mean
     # std_param_var - tensor for policy std before output
     mean_var, std_param_var = L.get_output([self._l_mean, self._l_std_param], obs_var)
     if self.min_std_param is not None:
         std_param_var = tf.maximum(std_param_var, self.min_std_param)
     if self.std_parametrization == 'exp':
         log_std_var = std_param_var
     elif self.std_parametrization == 'softplus':
         log_std_var = tf.log(tf.log(1. + tf.exp(std_param_var)))
     else:
         raise NotImplementedError
     return dict(mean=mean_var, log_std=log_std_var)
Example #34
0
    def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity,
                 output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer,
                 input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False,
                 ):

        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(
                    l_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization
                )
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)
                self._layers.append(l_hid)
            l_out = L.DenseLayer(
                l_hid,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output",
                W=output_W_init,
                b=output_b_init,
                weight_normalization=weight_normalization
            )
            if batch_normalization:
                l_out = L.batch_norm(l_out)
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
    def __init__(
            self,
            name,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            prob_network=None,
    ):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        assert isinstance(env_spec.action_space, Discrete)
        obs_dim = env_spec.observation_space.flat_dim
        action_dim = env_spec.action_space.flat_dim

        with tf.variable_scope(name):
            if prob_network is None:
                prob_network = self.create_MLP(
                    input_shape=(obs_dim,),
                    output_dim=env_spec.action_space.n,
                    hidden_sizes=hidden_sizes,
                    name="prob_network",
                )
            self._l_obs, self._l_prob = self.forward_MLP('prob_network', prob_network,
                n_hidden=len(hidden_sizes), input_shape=(obs_dim,),
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=tf.nn.softmax, reuse=None)

            # if you want to input your own tensor.
            self._forward_out = lambda x, is_train: self.forward_MLP('prob_network', prob_network,
                n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1]


            self._f_prob = tensor_utils.compile_function(
                [self._l_obs],
                L.get_output(self._l_prob)
            )

            self._dist = Categorical(env_spec.action_space.n)
Example #36
0
    def _make_subnetwork(self,
                         input_layer,
                         dim_output,
                         hidden_sizes,
                         output_nonlinearity=tf.sigmoid,
                         name="pred_network"):

        prob_network = MLP(
            # input_shape=(env_spec.observation_space.flat_dim,),
            output_dim=dim_output,
            hidden_sizes=hidden_sizes,
            hidden_nonlinearity=lrelu,
            output_nonlinearity=output_nonlinearity,
            name=name,
            input_layer=input_layer)

        return L.get_output(
            prob_network.output_layer), prob_network.output_layer
Example #37
0
    def __init__(
        self,
        name,
        model,
        abstract_dim,
        reward_fn=None,
        hidden_dim=32,
        hidden_nonlinearity=tf.tanh,
        output_nonlinearity=None,
        lstm_layer_cls=L.LSTMLayer,
    ):
        # possible to pass in reward_fn?
        with tf.variable_scope(name):
            self.obs_dim = abstract_dim
            self.net = LSTMNetwork(
                input_shape=self.obs_dim,
                input_layer=l_feature,
                output_dim=self.obs_dim,
                hidden_dim=hidden_dim,
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                lstm_layer_cls=lstm_layer_cls,
                name="planner",
            )
            self.obs_var = self.net.input_layer.input_var
            self.output = L.get_output(self.net.output_layer, self.obs_var)

            env = HalfCheetahTargEnv()
            target_init = tf.constant(env.TARGET)
            target = tf.get_variable('target',
                                     initializer=init,
                                     trainable=False)
            self.loss = -self.model.get_loglikelihood(
                self.obs_var, self.output) * tf.norm(target - self.output)

            self.optimizer = optim
            self.train_op = optim.minimize(self.loss)

            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=config)
            self.sess.run(tf.global_variables_initializer())
    def __init__(
            self,
            name,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            prob_network=None,
    ):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        assert isinstance(env_spec.action_space, Discrete)

        with tf.variable_scope(name):
            if prob_network is None:
                prob_network = MLP(
                    input_shape=(env_spec.observation_space.flat_dim,),
                    output_dim=env_spec.action_space.n,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=tf.nn.softmax,
                    name="prob_network",
                )

            self._l_prob = prob_network.output_layer
            self._l_obs = prob_network.input_layer
            self._f_prob = tensor_utils.compile_function(
                [prob_network.input_layer.input_var],
                L.get_output(prob_network.output_layer)
            )

            self._dist = Categorical(env_spec.action_space.n)

            super(CategoricalMLPPolicy, self).__init__(env_spec)
            LayersPowered.__init__(self, [prob_network.output_layer])
    def __init__(
            self,
            name,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            prob_network=None,
    ):
        """
        :param env_spec: A spec for the mdp.
        :param hidden_sizes: list of sizes for the fully connected hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param prob_network: manually specified network for this policy, other network params
        are ignored
        :return:
        """
        Serializable.quick_init(self, locals())

        assert isinstance(env_spec.action_space, Discrete)

        with tf.variable_scope(name):
            if prob_network is None:
                prob_network = MLP(
                    input_shape=(env_spec.observation_space.flat_dim,),
                    output_dim=env_spec.action_space.n,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=tf.nn.softmax,
                    name="prob_network",
                )

            self._l_prob = prob_network.output_layer
            self._l_obs = prob_network.input_layer
            self._f_prob = tensor_utils.compile_function(
                [prob_network.input_layer.input_var],
                L.get_output(prob_network.output_layer)
            )

            self._dist = Categorical(env_spec.action_space.n)

            super(CategoricalMLPPolicy, self).__init__(env_spec)
            LayersPowered.__init__(self, [prob_network.output_layer])
    def __init__(
            self,
            name,
            env_spec,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.relu,
            output_nonlinearity=tf.nn.tanh,
            prob_network=None,
            bn=False):
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if prob_network is None:
                prob_network = MLP(
                    input_shape=(env_spec.observation_space.flat_dim,),
                    output_dim=env_spec.action_space.flat_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                    # batch_normalization=True,
                    name="prob_network",
                )

            self._l_prob = prob_network.output_layer
            self._l_obs = prob_network.input_layer
            self._f_prob = tensor_utils.compile_function(
                [prob_network.input_layer.input_var],
                L.get_output(prob_network.output_layer, deterministic=True)
            )

        self.prob_network = prob_network

        # Note the deterministic=True argument. It makes sure that when getting
        # actions from single observations, we do not update params in the
        # batch normalization layers.
        # TODO: this doesn't currently work properly in the tf version so we leave out batch_norm
        super(DeterministicMLPPolicy, self).__init__(env_spec)
        LayersPowered.__init__(self, [prob_network.output_layer])
Example #41
0
 def log_likelihood_sym(self, x_var, y_var):
     normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
     prob = L.get_output(self.l_prob, {self.prob_network.input_layer: normalized_xs_var})
     return self._dist.log_likelihood_sym(y_var, dict(prob=prob))
Example #42
0
    def __init__(
            self,
            name,
            env_spec,
            hidden_dim=32,
            feature_network=None,
            state_include_action=True,
            hidden_nonlinearity=tf.tanh,
            learn_std=True,
            init_std=1.0,
            output_nonlinearity=None,
    ):
        """
        :param env_spec: A spec for the env.
        :param hidden_dim: dimension of hidden layer
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        with tf.variable_scope(name):
            Serializable.quick_init(self, locals())
            super(GaussianGRUPolicy, self).__init__(env_spec)

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            if state_include_action:
                input_dim = obs_dim + action_dim
            else:
                input_dim = obs_dim

            l_input = L.InputLayer(
                shape=(None, None, input_dim),
                name="input"
            )

            if feature_network is None:
                feature_dim = input_dim
                l_flat_feature = None
                l_feature = l_input
            else:
                feature_dim = feature_network.output_layer.output_shape[-1]
                l_flat_feature = feature_network.output_layer
                l_feature = L.OpLayer(
                    l_flat_feature,
                    extras=[l_input],
                    name="reshape_feature",
                    op=lambda flat_feature, input: tf.reshape(
                        flat_feature,
                        tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
                    ),
                    shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
                )

            mean_network = GRUNetwork(
                input_shape=(feature_dim,),
                input_layer=l_feature,
                output_dim=action_dim,
                hidden_dim=hidden_dim,
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                name="mean_network"
            )

            l_log_std = L.ParamLayer(
                mean_network.input_layer,
                num_units=action_dim,
                param=tf.constant_initializer(np.log(init_std)),
                name="output_log_std",
                trainable=learn_std,
            )

            l_step_log_std = L.ParamLayer(
                mean_network.step_input_layer,
                num_units=action_dim,
                param=l_log_std.param,
                name="step_output_log_std",
                trainable=learn_std,
            )

            self.mean_network = mean_network
            self.feature_network = feature_network
            self.l_input = l_input
            self.state_include_action = state_include_action

            flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input")
            if feature_network is None:
                feature_var = flat_input_var
            else:
                feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})

            self.f_step_mean_std = tensor_utils.compile_function(
                [
                    flat_input_var,
                    mean_network.step_prev_hidden_layer.input_var,
                ],
                L.get_output([
                    mean_network.step_output_layer,
                    l_step_log_std,
                    mean_network.step_hidden_layer,
                ], {mean_network.step_input_layer: feature_var})
            )

            self.l_log_std = l_log_std

            self.input_dim = input_dim
            self.action_dim = action_dim
            self.hidden_dim = hidden_dim

            self.prev_actions = None
            self.prev_hiddens = None
            self.dist = RecurrentDiagonalGaussian(action_dim)

            out_layers = [mean_network.output_layer, l_log_std, l_step_log_std]
            if feature_network is not None:
                out_layers.append(feature_network.output_layer)

            LayersPowered.__init__(self, out_layers)
 def get_action_sym(self, obs_var):
     return L.get_output(self.prob_network.output_layer, obs_var)
Example #44
0
    def __init__(
            self,
            name,
            env_spec,
            hidden_dim=32,
            feature_network=None,
            prob_network=None,
            state_include_action=True,
            hidden_nonlinearity=tf.tanh,
            forget_bias=1.0,
            use_peepholes=False):
        """
        :param env_spec: A spec for the env.
        :param hidden_dim: dimension of hidden layer
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        with tf.variable_scope(name):
            assert isinstance(env_spec.action_space, Discrete)
            Serializable.quick_init(self, locals())
            super(CategoricalLSTMPolicy, self).__init__(env_spec)

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            if state_include_action:
                input_dim = obs_dim + action_dim
            else:
                input_dim = obs_dim

            l_input = L.InputLayer(
                shape=(None, None, input_dim),
                name="input"
            )

            if feature_network is None:
                feature_dim = input_dim
                l_flat_feature = None
                l_feature = l_input
            else:
                feature_dim = feature_network.output_layer.output_shape[-1]
                l_flat_feature = feature_network.output_layer
                l_feature = L.OpLayer(
                    l_flat_feature,
                    extras=[l_input],
                    name="reshape_feature",
                    op=lambda flat_feature, input: tf.reshape(
                        flat_feature,
                        tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
                    ),
                    shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
                )

            if prob_network is None:
                prob_network = LSTMNetwork(
                    input_shape=(feature_dim,),
                    input_layer=l_feature,
                    output_dim=env_spec.action_space.n,
                    hidden_dim=hidden_dim,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=tf.nn.softmax,
                    forget_bias=forget_bias,
                    use_peepholes=use_peepholes,
                    name="prob_network"
                )

            self.prob_network = prob_network
            self.feature_network = feature_network
            self.l_input = l_input
            self.state_include_action = state_include_action

            flat_input_var = tf.placeholder(dtype=tf.float32, shape=(None, input_dim), name="flat_input")
            if feature_network is None:
                feature_var = flat_input_var
            else:
                feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})

            self.f_step_prob = tensor_utils.compile_function(
                [
                    flat_input_var,
                    prob_network.step_prev_hidden_layer.input_var,
                    prob_network.step_prev_cell_layer.input_var
                ],
                L.get_output([
                    prob_network.step_output_layer,
                    prob_network.step_hidden_layer,
                    prob_network.step_cell_layer
                ], {prob_network.step_input_layer: feature_var})
            )

            self.input_dim = input_dim
            self.action_dim = action_dim
            self.hidden_dim = hidden_dim

            self.prev_actions = None
            self.prev_hiddens = None
            self.prev_cells = None
            self.dist = RecurrentCategorical(env_spec.action_space.n)

            out_layers = [prob_network.output_layer]
            if feature_network is not None:
                out_layers.append(feature_network.output_layer)

            LayersPowered.__init__(self, out_layers)
Example #45
0
 def auxiliary_pred_sym(self, obs_var, state_info_vars=None):
     aux_pred = L.get_output(self._l_aux_pred, obs_var)
     return aux_pred
Example #46
0
    def __init__(
            self,
            name,
            env_spec,
            hidden_dims=(32,),
            feature_network=None,
            state_include_action=True,
            hidden_nonlinearity=tf.tanh):
        """
        :param env_spec: A spec for the env.
        :param hidden_dims: dimension of hidden layers
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        with tf.variable_scope(name):
            assert isinstance(env_spec.action_space, Discrete)
            Serializable.quick_init(self, locals())
            super(RecurrentCategoricalPolicy, self).__init__(env_spec)

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            if state_include_action:
                input_dim = obs_dim + action_dim
            else:
                input_dim = obs_dim

            l_input = L.InputLayer(
                shape=(None, None, input_dim),
                name="input"
            )

            if feature_network is None:
                feature_dim = input_dim
                l_flat_feature = None
                l_feature = l_input
            else:
                feature_dim = feature_network.output_layer.output_shape[-1]
                l_flat_feature = feature_network.output_layer
                l_feature = L.OpLayer(
                    l_flat_feature,
                    extras=[l_input],
                    name="reshape_feature",
                    op=lambda flat_feature, input: tf.reshape(
                        flat_feature,
                        tf.pack([tf.shape(input)[0], tf.shape(input)[1], feature_dim])
                    ),
                    shape_op=lambda _, input_shape: (input_shape[0], input_shape[1], feature_dim)
                )

            prob_network = DeepGRUNetwork(
                input_shape=(feature_dim,),
                input_layer=l_feature,
                output_dim=env_spec.action_space.n,
                hidden_dims=hidden_dims,
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=tf.nn.softmax,
                name="prob_network"
            )

            self.prob_network = prob_network
            self.feature_network = feature_network
            self.l_input = l_input
            self.state_include_action = state_include_action

            flat_input_var = tf.placeholder(tf.float32, shape=(None, input_dim), name="flat_input")
            if feature_network is None:
                feature_var = flat_input_var
            else:
                feature_var = L.get_output(l_flat_feature, {feature_network.input_layer: flat_input_var})

            # Build the step feedforward function.
            inputs = [flat_input_var] \
                    + [prev_hidden.input_var for prev_hidden
                            in prob_network.step_prev_hidden_layers]
            outputs = [prob_network.step_output_layer] \
                    + prob_network.step_hidden_layers
            outputs = L.get_output(outputs, {prob_network.step_input_layer: feature_var})
            self.f_step_prob = tensor_utils.compile_function(
                    inputs, outputs)

            # Function to fetch hidden init values
            self.f_hid_inits = tensor_utils.compile_function(
                    [], prob_network.hid_inits)

            self.input_dim = input_dim
            self.action_dim = action_dim
            self.hidden_dims = hidden_dims

            self.prev_actions = None
            self.prev_hiddens = None
            self.dist = RecurrentCategorical(env_spec.action_space.n)

            out_layers = [prob_network.output_layer]
            if feature_network is not None:
                out_layers.append(feature_network.output_layer)

            LayersPowered.__init__(self, out_layers)
Example #47
0
 def dist_info_sym(self, x_var):
     normalized_xs_var = (x_var - self.x_mean_var) / self.x_std_var
     prob = L.get_output(self.l_prob, {self.prob_network.input_layer: normalized_xs_var})
     return dict(prob=prob)
 def dist_info_sym(self, obs_var, state_info_vars=None):
     return dict(prob=L.get_output(self._l_prob, {self._l_obs: tf.cast(obs_var, tf.float32)}))
 def predict_sym(self, xs):
     return L.get_output(self.l_out, xs)
    def __init__(
            self,
            name,
            input_shape,
            output_dim,
            network=None,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            output_nonlinearity=None,
            optimizer=None,
            normalize_inputs=True,
    ):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        """
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):

            if optimizer is None:
                optimizer = LbfgsOptimizer(name="optimizer")

            self.output_dim = output_dim
            self.optimizer = optimizer

            if network is None:
                network = MLP(
                    input_shape=input_shape,
                    output_dim=output_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                    name="network"
                )

            l_out = network.output_layer

            LayersPowered.__init__(self, [l_out])

            xs_var = network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="ys")

            x_mean_var = tf.get_variable(
                name="x_mean",
                shape=(1,) + input_shape,
                initializer=tf.constant_initializer(0., dtype=tf.float32)
            )
            x_std_var = tf.get_variable(
                name="x_std",
                shape=(1,) + input_shape,
                initializer=tf.constant_initializer(1., dtype=tf.float32)
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var

            fit_ys_var = L.get_output(l_out, {network.input_layer: normalized_xs_var})

            loss = - tf.reduce_mean(tf.square(fit_ys_var - ys_var))

            self.f_predict = tensor_utils.compile_function([xs_var], fit_ys_var)

            optimizer_args = dict(
                loss=loss,
                target=self,
                network_outputs=[fit_ys_var],
            )

            optimizer_args["inputs"] = [xs_var, ys_var]

            self.optimizer.update_opt(**optimizer_args)

            self.name = name
            self.l_out = l_out

            self.normalize_inputs = normalize_inputs
            self.x_mean_var = x_mean_var
            self.x_std_var = x_std_var
Example #51
0
    def __init__(
            self,
            name,
            input_shape,
            output_dim,
            prob_network=None,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            optimizer=None,
            tr_optimizer=None,
            use_trust_region=True,
            step_size=0.01,
            normalize_inputs=True,
            no_initial_trust_region=True,
    ):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        :param use_trust_region: Whether to use trust region constraint.
        :param step_size: KL divergence constraint for each iteration
        """
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):
            if optimizer is None:
                optimizer = LbfgsOptimizer(name="optimizer")
            if tr_optimizer is None:
                tr_optimizer = ConjugateGradientOptimizer()

            self.output_dim = output_dim
            self.optimizer = optimizer
            self.tr_optimizer = tr_optimizer

            if prob_network is None:
                prob_network = MLP(
                    input_shape=input_shape,
                    output_dim=output_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=tf.nn.softmax,
                    name="prob_network"
                )

            l_prob = prob_network.output_layer

            LayersPowered.__init__(self, [l_prob])

            xs_var = prob_network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="ys")
            old_prob_var = tf.placeholder(dtype=tf.float32, shape=[None, output_dim], name="old_prob")

            x_mean_var = tf.get_variable(
                name="x_mean",
                shape=(1,) + input_shape,
                initializer=tf.constant_initializer(0., dtype=tf.float32)
            )
            x_std_var = tf.get_variable(
                name="x_std",
                shape=(1,) + input_shape,
                initializer=tf.constant_initializer(1., dtype=tf.float32)
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var

            prob_var = L.get_output(l_prob, {prob_network.input_layer: normalized_xs_var})

            old_info_vars = dict(prob=old_prob_var)
            info_vars = dict(prob=prob_var)

            dist = self._dist = Categorical(output_dim)

            mean_kl = tf.reduce_mean(dist.kl_sym(old_info_vars, info_vars))

            loss = - tf.reduce_mean(dist.log_likelihood_sym(ys_var, info_vars))

            predicted = tensor_utils.to_onehot_sym(tf.argmax(prob_var, dimension=1), output_dim)

            self.prob_network = prob_network
            self.f_predict = tensor_utils.compile_function([xs_var], predicted)
            self.f_prob = tensor_utils.compile_function([xs_var], prob_var)
            self.l_prob = l_prob

            self.optimizer.update_opt(loss=loss, target=self, network_outputs=[prob_var], inputs=[xs_var, ys_var])
            self.tr_optimizer.update_opt(loss=loss, target=self, network_outputs=[prob_var],
                                         inputs=[xs_var, ys_var, old_prob_var],
                                         leq_constraint=(mean_kl, step_size)
                                         )

            self.use_trust_region = use_trust_region
            self.name = name

            self.normalize_inputs = normalize_inputs
            self.x_mean_var = x_mean_var
            self.x_std_var = x_std_var
            self.first_optimized = not no_initial_trust_region
    def __init__(
            self,
            name,
            input_shape,
            output_dim,
            mean_network=None,
            hidden_sizes=(32, 32),
            hidden_nonlinearity=tf.nn.tanh,
            optimizer=None,
            use_trust_region=True,
            step_size=0.01,
            learn_std=True,
            init_std=1.0,
            adaptive_std=False,
            std_share_network=False,
            std_hidden_sizes=(32, 32),
            std_nonlinearity=None,
            normalize_inputs=True,
            normalize_outputs=True,
            subsample_factor=1.0
    ):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        :param use_trust_region: Whether to use trust region constraint.
        :param step_size: KL divergence constraint for each iteration
        :param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If
        adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned.
        :param adaptive_std: Whether to make the std a function of the states.
        :param std_share_network: Whether to use the same network as the mean.
        :param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if
        `std_share_network` is False. It defaults to the same architecture as the mean.
        :param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network`
        is False. It defaults to the same non-linearity as the mean.
        """
        Serializable.quick_init(self, locals())

        with tf.variable_scope(name):

            if optimizer is None:
                if use_trust_region:
                    optimizer = PenaltyLbfgsOptimizer("optimizer")
                else:
                    optimizer = LbfgsOptimizer("optimizer")

            self._optimizer = optimizer
            self._subsample_factor = subsample_factor

            if mean_network is None:
                mean_network = MLP(
                    name="mean_network",
                    input_shape=input_shape,
                    output_dim=output_dim,
                    hidden_sizes=hidden_sizes,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=None,
                )

            l_mean = mean_network.output_layer

            if adaptive_std:
                l_log_std = MLP(
                    name="log_std_network",
                    input_shape=input_shape,
                    input_var=mean_network.input_layer.input_var,
                    output_dim=output_dim,
                    hidden_sizes=std_hidden_sizes,
                    hidden_nonlinearity=std_nonlinearity,
                    output_nonlinearity=None,
                ).output_layer
            else:
                l_log_std = L.ParamLayer(
                    mean_network.input_layer,
                    num_units=output_dim,
                    param=tf.constant_initializer(np.log(init_std)),
                    name="output_log_std",
                    trainable=learn_std,
                )

            LayersPowered.__init__(self, [l_mean, l_log_std])

            xs_var = mean_network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32, name="ys", shape=(None, output_dim))
            old_means_var = tf.placeholder(dtype=tf.float32, name="ys", shape=(None, output_dim))
            old_log_stds_var = tf.placeholder(dtype=tf.float32, name="old_log_stds", shape=(None, output_dim))

            x_mean_var = tf.Variable(
                np.zeros((1,) + input_shape, dtype=np.float32),
                name="x_mean",
            )
            x_std_var = tf.Variable(
                np.ones((1,) + input_shape, dtype=np.float32),
                name="x_std",
            )
            y_mean_var = tf.Variable(
                np.zeros((1, output_dim), dtype=np.float32),
                name="y_mean",
            )
            y_std_var = tf.Variable(
                np.ones((1, output_dim), dtype=np.float32),
                name="y_std",
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var
            normalized_ys_var = (ys_var - y_mean_var) / y_std_var

            normalized_means_var = L.get_output(l_mean, {mean_network.input_layer: normalized_xs_var})
            normalized_log_stds_var = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var})

            means_var = normalized_means_var * y_std_var + y_mean_var
            log_stds_var = normalized_log_stds_var + tf.log(y_std_var)

            normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
            normalized_old_log_stds_var = old_log_stds_var - tf.log(y_std_var)

            dist = self._dist = DiagonalGaussian(output_dim)

            normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var)

            mean_kl = tf.reduce_mean(dist.kl_sym(
                dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var),
                normalized_dist_info_vars,
            ))

            loss = - tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars))

            self._f_predict = tensor_utils.compile_function([xs_var], means_var)
            self._f_pdists = tensor_utils.compile_function([xs_var], [means_var, log_stds_var])
            self._l_mean = l_mean
            self._l_log_std = l_log_std

            optimizer_args = dict(
                loss=loss,
                target=self,
                network_outputs=[normalized_means_var, normalized_log_stds_var],
            )

            if use_trust_region:
                optimizer_args["leq_constraint"] = (mean_kl, step_size)
                optimizer_args["inputs"] = [xs_var, ys_var, old_means_var, old_log_stds_var]
            else:
                optimizer_args["inputs"] = [xs_var, ys_var]

            self._optimizer.update_opt(**optimizer_args)

            self._use_trust_region = use_trust_region
            self._name = name

            self._normalize_inputs = normalize_inputs
            self._normalize_outputs = normalize_outputs
            self._mean_network = mean_network
            self._x_mean_var = x_mean_var
            self._x_std_var = x_std_var
            self._y_mean_var = y_mean_var
            self._y_std_var = y_std_var