コード例 #1
0
    def __init__(self,
                 input_shape,
                 output_dim,
                 name="GaussianMLPRegressor",
                 mean_network=None,
                 hidden_sizes=(32, 32),
                 hidden_nonlinearity=tf.nn.tanh,
                 optimizer=None,
                 optimizer_args=None,
                 use_trust_region=True,
                 max_kl_step=0.01,
                 learn_std=True,
                 init_std=1.0,
                 adaptive_std=False,
                 std_share_network=False,
                 std_hidden_sizes=(32, 32),
                 std_nonlinearity=None,
                 normalize_inputs=True,
                 normalize_outputs=True,
                 subsample_factor=1.0):
        """
        :param input_shape: Shape of the input data.
        :param output_dim: Dimension of output.
        :param hidden_sizes: Number of hidden units of each layer of the mean
         network.
        :param hidden_nonlinearity: Non-linearity used for each layer of the
         mean network.
        :param optimizer: Optimizer for minimizing the negative log-likelihood.
        :param use_trust_region: Whether to use trust region constraint.
        :param max_kl_step: KL divergence constraint for each iteration
        :param learn_std: Whether to learn the standard deviations. Only
         effective if adaptive_std is False. If adaptive_std is True, this
         parameter is ignored, and the weights for the std network are always
         earned.
        :param adaptive_std: Whether to make the std a function of the states.
        :param std_share_network: Whether to use the same network as the mean.
        :param std_hidden_sizes: Number of hidden units of each layer of the
         std network. Only used if `std_share_network` is False. It defaults to
         the same architecture as the mean.
        :param std_nonlinearity: Non-linearity used for each layer of the std
         network. Only used if `std_share_network` is False. It defaults to the
         same non-linearity as the mean.
        """
        Parameterized.__init__(self)
        Serializable.quick_init(self, locals())
        self._mean_network_name = "mean_network"
        self._std_network_name = "std_network"

        with tf.variable_scope(name):
            if optimizer_args is None:
                optimizer_args = dict()

            if optimizer is None:
                if use_trust_region:
                    optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
                else:
                    optimizer = LbfgsOptimizer(**optimizer_args)
            else:
                optimizer = optimizer(**optimizer_args)

            self._optimizer = optimizer
            self._subsample_factor = subsample_factor

            if mean_network is None:
                if std_share_network:
                    mean_network = MLP(
                        name="mean_network",
                        input_shape=input_shape,
                        output_dim=2 * output_dim,
                        hidden_sizes=hidden_sizes,
                        hidden_nonlinearity=hidden_nonlinearity,
                        output_nonlinearity=None,
                    )
                    l_mean = L.SliceLayer(
                        mean_network.output_layer,
                        slice(output_dim),
                        name="mean_slice",
                    )
                else:
                    mean_network = MLP(
                        name="mean_network",
                        input_shape=input_shape,
                        output_dim=output_dim,
                        hidden_sizes=hidden_sizes,
                        hidden_nonlinearity=hidden_nonlinearity,
                        output_nonlinearity=None,
                    )
                    l_mean = mean_network.output_layer

            if adaptive_std:
                l_log_std = MLP(
                    name="log_std_network",
                    input_shape=input_shape,
                    input_var=mean_network.input_layer.input_var,
                    output_dim=output_dim,
                    hidden_sizes=std_hidden_sizes,
                    hidden_nonlinearity=std_nonlinearity,
                    output_nonlinearity=None,
                ).output_layer
            elif std_share_network:
                l_log_std = L.SliceLayer(
                    mean_network.output_layer,
                    slice(output_dim, 2 * output_dim),
                    name="log_std_slice",
                )
            else:
                l_log_std = L.ParamLayer(
                    mean_network.input_layer,
                    num_units=output_dim,
                    param=tf.constant_initializer(np.log(init_std)),
                    name="output_log_std",
                    trainable=learn_std,
                )

            LayersPowered.__init__(self, [l_mean, l_log_std])

            xs_var = mean_network.input_layer.input_var
            ys_var = tf.placeholder(dtype=tf.float32,
                                    name="ys",
                                    shape=(None, output_dim))
            old_means_var = tf.placeholder(dtype=tf.float32,
                                           name="ys",
                                           shape=(None, output_dim))
            old_log_stds_var = tf.placeholder(dtype=tf.float32,
                                              name="old_log_stds",
                                              shape=(None, output_dim))

            x_mean_var = tf.Variable(
                np.zeros((1, ) + input_shape, dtype=np.float32),
                name="x_mean",
            )
            x_std_var = tf.Variable(
                np.ones((1, ) + input_shape, dtype=np.float32),
                name="x_std",
            )
            y_mean_var = tf.Variable(
                np.zeros((1, output_dim), dtype=np.float32),
                name="y_mean",
            )
            y_std_var = tf.Variable(
                np.ones((1, output_dim), dtype=np.float32),
                name="y_std",
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var
            normalized_ys_var = (ys_var - y_mean_var) / y_std_var

            with tf.name_scope(self._mean_network_name,
                               values=[normalized_xs_var]):
                normalized_means_var = L.get_output(
                    l_mean, {mean_network.input_layer: normalized_xs_var})
            with tf.name_scope(self._std_network_name,
                               values=[normalized_xs_var]):
                normalized_log_stds_var = L.get_output(
                    l_log_std, {mean_network.input_layer: normalized_xs_var})

            means_var = normalized_means_var * y_std_var + y_mean_var
            log_stds_var = normalized_log_stds_var + tf.log(y_std_var)

            normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
            normalized_old_log_stds_var = old_log_stds_var - tf.log(y_std_var)

            dist = self._dist = DiagonalGaussian(output_dim)

            normalized_dist_info_vars = dict(mean=normalized_means_var,
                                             log_std=normalized_log_stds_var)

            mean_kl = tf.reduce_mean(
                dist.kl_sym(
                    dict(mean=normalized_old_means_var,
                         log_std=normalized_old_log_stds_var),
                    normalized_dist_info_vars,
                ))

            loss = -tf.reduce_mean(
                dist.log_likelihood_sym(normalized_ys_var,
                                        normalized_dist_info_vars))

            self._f_predict = tensor_utils.compile_function([xs_var],
                                                            means_var)
            self._f_pdists = tensor_utils.compile_function(
                [xs_var], [means_var, log_stds_var])
            self._l_mean = l_mean
            self._l_log_std = l_log_std

            optimizer_args = dict(
                loss=loss,
                target=self,
                network_outputs=[
                    normalized_means_var, normalized_log_stds_var
                ],
            )

            if use_trust_region:
                optimizer_args["leq_constraint"] = (mean_kl, max_kl_step)
                optimizer_args["inputs"] = [
                    xs_var, ys_var, old_means_var, old_log_stds_var
                ]
            else:
                optimizer_args["inputs"] = [xs_var, ys_var]

            self._optimizer.update_opt(**optimizer_args)

            self._use_trust_region = use_trust_region
            self._name = name

            self._normalize_inputs = normalize_inputs
            self._normalize_outputs = normalize_outputs
            self._mean_network = mean_network
            self._x_mean_var = x_mean_var
            self._x_std_var = x_std_var
            self._y_mean_var = y_mean_var
            self._y_std_var = y_std_var

            # Optionally create assign operations for normalization
            if self._normalize_inputs:
                self._x_mean_var_ph = tf.placeholder(
                    shape=(1, ) + input_shape,
                    dtype=tf.float32,
                )
                self._x_std_var_ph = tf.placeholder(
                    shape=(1, ) + input_shape,
                    dtype=tf.float32,
                )
                self._assign_x_mean = tf.assign(self._x_mean_var,
                                                self._x_mean_var_ph)
                self._assign_x_std = tf.assign(self._x_std_var,
                                               self._x_std_var_ph)
            if self._normalize_outputs:
                self._y_mean_var_ph = tf.placeholder(
                    shape=(1, output_dim),
                    dtype=tf.float32,
                )
                self._y_std_var_ph = tf.placeholder(
                    shape=(1, output_dim),
                    dtype=tf.float32,
                )
                self._assign_y_mean = tf.assign(self._y_mean_var,
                                                self._y_mean_var_ph)
                self._assign_y_std = tf.assign(self._y_std_var,
                                               self._y_std_var_ph)
コード例 #2
0
ファイル: gaussian_mlp_policy.py プロジェクト: gntoni/garage
    def __init__(self,
                 env_spec,
                 name=None,
                 hidden_sizes=(32, 32),
                 learn_std=True,
                 init_std=1.0,
                 adaptive_std=False,
                 std_share_network=False,
                 std_hidden_sizes=(32, 32),
                 min_std=1e-6,
                 std_hidden_nonlinearity=tf.nn.tanh,
                 hidden_nonlinearity=tf.nn.tanh,
                 output_nonlinearity=None,
                 mean_network=None,
                 std_network=None,
                 std_parametrization='exp'):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden
        layers
        :param learn_std: Is std trainable
        :param init_std: Initial std
        :param adaptive_std:
        :param std_share_network:
        :param std_hidden_sizes: list of sizes for the fully-connected layers
         for std
        :param min_std: whether to make sure that the std is at least some
         threshold value, to avoid numerical issues
        :param std_hidden_nonlinearity:
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :param std_network: custom network for the output log std
        :param std_parametrization: how the std should be parametrized. There
         are a few options:
            - exp: the logarithm of the std will be stored, and applied a
             exponential transformation
            - softplus: the std will be computed as log(1+exp(x))
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Box)
        self.name = name
        self._mean_network_name = "mean_network"
        self._std_network_name = "std_network"

        with tf.variable_scope(name, "GaussianMLPPolicy"):

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            # create network
            if mean_network is None:
                if std_share_network:
                    if std_parametrization == "exp":
                        init_std_param = np.log(init_std)
                    elif std_parametrization == "softplus":
                        init_std_param = np.log(np.exp(init_std) - 1)
                    else:
                        raise NotImplementedError
                    init_b = tf.constant_initializer(init_std_param)
                    with tf.variable_scope(self._mean_network_name):
                        mean_network = MLP(
                            name="mlp",
                            input_shape=(obs_dim, ),
                            output_dim=2 * action_dim,
                            hidden_sizes=hidden_sizes,
                            hidden_nonlinearity=hidden_nonlinearity,
                            output_nonlinearity=output_nonlinearity,
                            output_b_init=init_b,
                        )
                        l_mean = L.SliceLayer(
                            mean_network.output_layer,
                            slice(action_dim),
                            name="mean_slice",
                        )
                else:
                    mean_network = MLP(
                        name=self._mean_network_name,
                        input_shape=(obs_dim, ),
                        output_dim=action_dim,
                        hidden_sizes=hidden_sizes,
                        hidden_nonlinearity=hidden_nonlinearity,
                        output_nonlinearity=output_nonlinearity,
                    )
                    l_mean = mean_network.output_layer
            self._mean_network = mean_network

            obs_var = mean_network.input_layer.input_var

            if std_network is not None:
                l_std_param = std_network.output_layer
            else:
                if adaptive_std:
                    std_network = MLP(
                        name=self._std_network_name,
                        input_shape=(obs_dim, ),
                        input_layer=mean_network.input_layer,
                        output_dim=action_dim,
                        hidden_sizes=std_hidden_sizes,
                        hidden_nonlinearity=std_hidden_nonlinearity,
                        output_nonlinearity=None,
                    )
                    l_std_param = std_network.output_layer
                elif std_share_network:
                    with tf.variable_scope(self._std_network_name):
                        l_std_param = L.SliceLayer(
                            mean_network.output_layer,
                            slice(action_dim, 2 * action_dim),
                            name="std_slice",
                        )
                else:
                    if std_parametrization == 'exp':
                        init_std_param = np.log(init_std)
                    elif std_parametrization == 'softplus':
                        init_std_param = np.log(np.exp(init_std) - 1)
                    else:
                        raise NotImplementedError
                    with tf.variable_scope(self._std_network_name):
                        l_std_param = L.ParamLayer(
                            mean_network.input_layer,
                            num_units=action_dim,
                            param=tf.constant_initializer(init_std_param),
                            name="output_std_param",
                            trainable=learn_std,
                        )

            self.std_parametrization = std_parametrization

            if std_parametrization == 'exp':
                min_std_param = np.log(min_std)
            elif std_parametrization == 'softplus':
                min_std_param = np.log(np.exp(min_std) - 1)
            else:
                raise NotImplementedError

            self.min_std_param = min_std_param

            # mean_var, log_std_var = L.get_output([l_mean, l_std_param])
            #
            # if self.min_std_param is not None:
            #     log_std_var = tf.maximum(log_std_var, np.log(min_std))
            #
            # self._mean_var, self._log_std_var = mean_var, log_std_var

            self._l_mean = l_mean
            self._l_std_param = l_std_param

            self._dist = DiagonalGaussian(action_dim)

            LayersPowered.__init__(self, [l_mean, l_std_param])
            super(GaussianMLPPolicy, self).__init__(env_spec)

            dist_info_sym = self.dist_info_sym(
                mean_network.input_layer.input_var, dict())
            mean_var = tf.identity(dist_info_sym["mean"], name="mean")
            log_std_var = tf.identity(dist_info_sym["log_std"],
                                      name="standard_dev")

            self._f_dist = tensor_utils.compile_function(
                inputs=[obs_var],
                outputs=[mean_var, log_std_var],
            )
コード例 #3
0
    def __init__(
        self,
        env_spec,
        name="GaussianGRUPolicy",
        hidden_dim=32,
        feature_network=None,
        state_include_action=True,
        hidden_nonlinearity=tf.tanh,
        gru_layer_cls=L.GRULayer,
        learn_std=True,
        init_std=1.0,
        output_nonlinearity=None,
        std_share_network=False,
    ):
        """
        :param env_spec: A spec for the env.
        :param hidden_dim: dimension of hidden layer
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        assert isinstance(env_spec.action_space, Box)

        self._mean_network_name = "mean_network"
        self._std_network_name = "std_network"

        with tf.variable_scope(name, "GaussianGRUPolicy"):
            Serializable.quick_init(self, locals())
            super(GaussianGRUPolicy, self).__init__(env_spec)

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            if state_include_action:
                input_dim = obs_dim + action_dim
            else:
                input_dim = obs_dim

            l_input = L.InputLayer(shape=(None, None, input_dim), name="input")

            if feature_network is None:
                feature_dim = input_dim
                l_flat_feature = None
                l_feature = l_input
            else:
                feature_dim = feature_network.output_layer.output_shape[-1]
                l_flat_feature = feature_network.output_layer
                l_feature = L.OpLayer(
                    l_flat_feature,
                    extras=[l_input],
                    name="reshape_feature",
                    op=lambda flat_feature, input: tf.reshape(
                        flat_feature,
                        tf.stack([
                            tf.shape(input)[0],
                            tf.shape(input)[1], feature_dim
                        ])),
                    shape_op=lambda _, input_shape:
                    (input_shape[0], input_shape[1], feature_dim))

            if std_share_network:
                mean_network = GRUNetwork(
                    input_shape=(feature_dim, ),
                    input_layer=l_feature,
                    output_dim=2 * action_dim,
                    hidden_dim=hidden_dim,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                    gru_layer_cls=gru_layer_cls,
                    name="gru_mean_network")

                l_mean = L.SliceLayer(mean_network.output_layer,
                                      slice(action_dim),
                                      name="mean_slice")

                l_step_mean = L.SliceLayer(mean_network.step_output_layer,
                                           slice(action_dim),
                                           name="step_mean_slice")

                l_log_std = L.SliceLayer(mean_network.output_layer,
                                         slice(action_dim, 2 * action_dim),
                                         name="log_std_slice")

                l_step_log_std = L.SliceLayer(mean_network.step_output_layer,
                                              slice(action_dim,
                                                    2 * action_dim),
                                              name="step_log_std_slice")
            else:
                mean_network = GRUNetwork(
                    input_shape=(feature_dim, ),
                    input_layer=l_feature,
                    output_dim=action_dim,
                    hidden_dim=hidden_dim,
                    hidden_nonlinearity=hidden_nonlinearity,
                    output_nonlinearity=output_nonlinearity,
                    gru_layer_cls=gru_layer_cls,
                    name="gru_mean_network")

                l_mean = mean_network.output_layer

                l_step_mean = mean_network.step_output_layer

                l_log_std = L.ParamLayer(
                    mean_network.input_layer,
                    num_units=action_dim,
                    param=tf.constant_initializer(np.log(init_std)),
                    name="output_log_std",
                    trainable=learn_std,
                )

                l_step_log_std = L.ParamLayer(
                    mean_network.step_input_layer,
                    num_units=action_dim,
                    param=l_log_std.param,
                    name="step_output_log_std",
                    trainable=learn_std,
                )

            self.mean_network = mean_network
            self.feature_network = feature_network
            self.l_input = l_input
            self.state_include_action = state_include_action

            flat_input_var = tf.placeholder(dtype=tf.float32,
                                            shape=(None, input_dim),
                                            name="flat_input")
            if feature_network is None:
                feature_var = flat_input_var
            else:
                feature_var = L.get_output(
                    l_flat_feature,
                    {feature_network.input_layer: flat_input_var})

            with tf.name_scope(self._mean_network_name):
                out_step_mean, out_step_hidden_mean = L.get_output(
                    [l_step_mean, mean_network.step_hidden_layer],
                    {mean_network.step_input_layer: feature_var})
                out_step_mean = tf.identity(out_step_mean, "step_mean")
                out_step_hidden_mean = tf.identity(out_step_hidden_mean,
                                                   "step_hidden_mean")

            with tf.name_scope(self._std_network_name):
                out_step_log_std = L.get_output(
                    l_step_log_std,
                    {mean_network.step_input_layer: feature_var})
                out_step_log_std = tf.identity(out_step_log_std,
                                               "step_log_std")

            self.f_step_mean_std = tensor_utils.compile_function([
                flat_input_var,
                mean_network.step_prev_state_layer.input_var,
            ], [out_step_mean, out_step_log_std, out_step_hidden_mean])

            self.l_mean = l_mean
            self.l_log_std = l_log_std

            self.input_dim = input_dim
            self.action_dim = action_dim
            self.hidden_dim = hidden_dim

            self.prev_actions = None
            self.prev_hiddens = None
            self.dist = RecurrentDiagonalGaussian(action_dim)
            self.name = name

            out_layers = [l_mean, l_log_std, l_step_log_std]
            if feature_network is not None:
                out_layers.append(feature_network.output_layer)

            LayersPowered.__init__(self, out_layers)
コード例 #4
0
ファイル: network.py プロジェクト: xht033/garage
    def __init__(self,
                 input_shape,
                 extra_input_shape,
                 output_dim,
                 hidden_sizes,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 name=None,
                 extra_hidden_sizes=None,
                 hidden_w_init=ly.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer(),
                 output_w_init=ly.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer(),
                 hidden_nonlinearity=tf.nn.relu,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None):
        Serializable.quick_init(self, locals())

        if extra_hidden_sizes is None:
            extra_hidden_sizes = []

        with tf.variable_scope(name, 'ConvMergeNetwork'):

            input_flat_dim = np.prod(input_shape)
            extra_input_flat_dim = np.prod(extra_input_shape)
            total_input_flat_dim = input_flat_dim + extra_input_flat_dim

            if input_layer is None:
                l_in = ly.InputLayer(shape=(None, total_input_flat_dim),
                                     input_var=input_var,
                                     name='input')
            else:
                l_in = input_layer

            l_conv_in = ly.reshape(ly.SliceLayer(l_in,
                                                 indices=slice(input_flat_dim),
                                                 name='conv_slice'),
                                   ([0], ) + input_shape,
                                   name='conv_reshaped')
            l_extra_in = ly.reshape(ly.SliceLayer(l_in,
                                                  indices=slice(
                                                      input_flat_dim, None),
                                                  name='extra_slice'),
                                    ([0], ) + extra_input_shape,
                                    name='extra_reshaped')

            l_conv_hid = l_conv_in
            for idx, conv_filter, filter_size, stride, pad in zip(
                    range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                l_conv_hid = ly.Conv2DLayer(
                    l_conv_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name='conv_hidden_%d' % idx,
                )

            l_extra_hid = l_extra_in
            for idx, hidden_size in enumerate(extra_hidden_sizes):
                l_extra_hid = ly.DenseLayer(
                    l_extra_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name='extra_hidden_%d' % idx,
                    w=hidden_w_init,
                    b=hidden_b_init,
                )

            l_joint_hid = ly.concat(
                [ly.flatten(l_conv_hid, name='conv_hidden_flat'), l_extra_hid],
                name='joint_hidden')

            for idx, hidden_size in enumerate(hidden_sizes):
                l_joint_hid = ly.DenseLayer(
                    l_joint_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name='joint_hidden_%d' % idx,
                    w=hidden_w_init,
                    b=hidden_b_init,
                )
            l_out = ly.DenseLayer(
                l_joint_hid,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name='output',
                w=output_w_init,
                b=output_b_init,
            )
            self._l_in = l_in
            self._l_out = l_out

            LayersPowered.__init__(self, [l_out], input_layers=[l_in])
コード例 #5
0
ファイル: network.py プロジェクト: xht033/garage
    def __init__(self,
                 input_shape,
                 output_dim,
                 hidden_dim,
                 name=None,
                 hidden_nonlinearity=tf.nn.relu,
                 output_w_init=ly.XavierUniformInitializer(),
                 recurrent_nonlinearity=tf.nn.sigmoid,
                 recurrent_w_x_init=ly.XavierUniformInitializer(),
                 recurrent_w_h_init=ly.OrthogonalInitializer(),
                 lstm_layer_cls=ly.LSTMLayer,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None,
                 forget_bias=1.0,
                 use_peepholes=False,
                 layer_args=None):
        with tf.variable_scope(name, 'LSTMNetwork'):
            if input_layer is None:
                l_in = ly.InputLayer(shape=(None, None) + input_shape,
                                     input_var=input_var,
                                     name='input')
            else:
                l_in = input_layer
            l_step_input = ly.InputLayer(shape=(None, ) + input_shape,
                                         name='step_input')
            # contains previous hidden and cell state
            l_step_prev_state = ly.InputLayer(shape=(None, hidden_dim * 2),
                                              name='step_prev_state')
            if layer_args is None:
                layer_args = dict()
            l_lstm = lstm_layer_cls(l_in,
                                    num_units=hidden_dim,
                                    hidden_nonlinearity=hidden_nonlinearity,
                                    gate_nonlinearity=recurrent_nonlinearity,
                                    hidden_init_trainable=False,
                                    name='lstm_layer',
                                    forget_bias=forget_bias,
                                    cell_init_trainable=False,
                                    w_x_init=recurrent_w_x_init,
                                    w_h_init=recurrent_w_h_init,
                                    use_peepholes=use_peepholes,
                                    **layer_args)
            l_lstm_flat = ly.ReshapeLayer(l_lstm,
                                          shape=(-1, hidden_dim),
                                          name='lstm_flat')
            l_output_flat = ly.DenseLayer(l_lstm_flat,
                                          num_units=output_dim,
                                          nonlinearity=output_nonlinearity,
                                          w=output_w_init,
                                          name='output_flat')
            l_output = ly.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input: tf.reshape(
                    flat_output,
                    tf.stack(
                        (tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name='output')
            l_step_state = l_lstm.get_step_layer(l_step_input,
                                                 l_step_prev_state,
                                                 name='step_state')
            l_step_hidden = ly.SliceLayer(l_step_state,
                                          indices=slice(hidden_dim),
                                          name='step_hidden')
            l_step_cell = ly.SliceLayer(l_step_state,
                                        indices=slice(hidden_dim, None),
                                        name='step_cell')
            l_step_output = ly.DenseLayer(l_step_hidden,
                                          num_units=output_dim,
                                          nonlinearity=output_nonlinearity,
                                          w=l_output_flat.w,
                                          b=l_output_flat.b,
                                          name='step_output')

            self._l_in = l_in
            self._hid_init_param = l_lstm.h0
            self._cell_init_param = l_lstm.c0
            self._l_lstm = l_lstm
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_cell = l_step_cell
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim
コード例 #6
0
    def __init__(self,
                 input_shape,
                 output_dim,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 hidden_sizes,
                 hidden_nonlinearity=tf.nn.tanh,
                 output_nonlinearity=None,
                 name='GaussianConvRegressor',
                 mean_network=None,
                 learn_std=True,
                 init_std=1.0,
                 adaptive_std=False,
                 std_share_network=False,
                 std_conv_filters=[],
                 std_conv_filter_sizes=[],
                 std_conv_strides=[],
                 std_conv_pads=[],
                 std_hidden_sizes=[],
                 std_hidden_nonlinearity=None,
                 std_output_nonlinearity=None,
                 normalize_inputs=True,
                 normalize_outputs=True,
                 subsample_factor=1.,
                 optimizer=None,
                 optimizer_args=dict(),
                 use_trust_region=True,
                 max_kl_step=0.01):
        Parameterized.__init__(self)
        Serializable.quick_init(self, locals())
        self._mean_network_name = 'mean_network'
        self._std_network_name = 'std_network'

        with tf.compat.v1.variable_scope(name):
            if optimizer is None:
                if use_trust_region:
                    optimizer = PenaltyLbfgsOptimizer(**optimizer_args)
                else:
                    optimizer = LbfgsOptimizer(**optimizer_args)
            else:
                optimizer = optimizer(**optimizer_args)

            self._optimizer = optimizer
            self._subsample_factor = subsample_factor

            if mean_network is None:
                if std_share_network:
                    b = np.concatenate(
                        [
                            np.zeros(output_dim),
                            np.full(output_dim, np.log(init_std))
                        ],
                        axis=0)  # yapf: disable
                    b = tf.constant_initializer(b)
                    mean_network = ConvNetwork(
                        name=self._mean_network_name,
                        input_shape=input_shape,
                        output_dim=2 * output_dim,
                        conv_filters=conv_filters,
                        conv_filter_sizes=conv_filter_sizes,
                        conv_strides=conv_strides,
                        conv_pads=conv_pads,
                        hidden_sizes=hidden_sizes,
                        hidden_nonlinearity=hidden_nonlinearity,
                        output_nonlinearity=output_nonlinearity,
                        output_b_init=b)
                    l_mean = layers.SliceLayer(
                        mean_network.output_layer,
                        slice(output_dim),
                        name='mean_slice',
                    )
                else:
                    mean_network = ConvNetwork(
                        name=self._mean_network_name,
                        input_shape=input_shape,
                        output_dim=output_dim,
                        conv_filters=conv_filters,
                        conv_filter_sizes=conv_filter_sizes,
                        conv_strides=conv_strides,
                        conv_pads=conv_pads,
                        hidden_sizes=hidden_sizes,
                        hidden_nonlinearity=hidden_nonlinearity,
                        output_nonlinearity=output_nonlinearity)
                    l_mean = mean_network.output_layer

            if adaptive_std:
                l_log_std = ConvNetwork(
                    name=self._std_network_name,
                    input_shape=input_shape,
                    output_dim=output_dim,
                    conv_filters=std_conv_filters,
                    conv_filter_sizes=std_conv_filter_sizes,
                    conv_strides=std_conv_strides,
                    conv_pads=std_conv_pads,
                    hidden_sizes=std_hidden_sizes,
                    hidden_nonlinearity=std_hidden_nonlinearity,
                    output_nonlinearity=std_output_nonlinearity,
                    output_b_init=tf.constant_initializer(np.log(init_std)),
                ).output_layer
            elif std_share_network:
                l_log_std = layers.SliceLayer(
                    mean_network.output_layer,
                    slice(output_dim, 2 * output_dim),
                    name='log_std_slice',
                )
            else:
                l_log_std = layers.ParamLayer(
                    mean_network.input_layer,
                    num_units=output_dim,
                    param=tf.constant_initializer(np.log(init_std)),
                    trainable=learn_std,
                    name=self._std_network_name,
                )

            LayersPowered.__init__(self, [l_mean, l_log_std])

            xs_var = mean_network.input_layer.input_var
            ys_var = tf.compat.v1.placeholder(
                dtype=tf.float32, name='ys', shape=(None, output_dim))
            old_means_var = tf.compat.v1.placeholder(
                dtype=tf.float32, name='ys', shape=(None, output_dim))
            old_log_stds_var = tf.compat.v1.placeholder(
                dtype=tf.float32,
                name='old_log_stds',
                shape=(None, output_dim))

            x_mean_var = tf.Variable(
                np.zeros((1, np.prod(input_shape)), dtype=np.float32),
                name='x_mean',
            )
            x_std_var = tf.Variable(
                np.ones((1, np.prod(input_shape)), dtype=np.float32),
                name='x_std',
            )
            y_mean_var = tf.Variable(
                np.zeros((1, output_dim), dtype=np.float32),
                name='y_mean',
            )
            y_std_var = tf.Variable(
                np.ones((1, output_dim), dtype=np.float32),
                name='y_std',
            )

            normalized_xs_var = (xs_var - x_mean_var) / x_std_var
            normalized_ys_var = (ys_var - y_mean_var) / y_std_var

            with tf.name_scope(
                    self._mean_network_name, values=[normalized_xs_var]):
                normalized_means_var = layers.get_output(
                    l_mean, {mean_network.input_layer: normalized_xs_var})
            with tf.name_scope(
                    self._std_network_name, values=[normalized_xs_var]):
                normalized_log_stds_var = layers.get_output(
                    l_log_std, {mean_network.input_layer: normalized_xs_var})

            means_var = normalized_means_var * y_std_var + y_mean_var
            log_stds_var = normalized_log_stds_var + tf.math.log(y_std_var)

            normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var
            normalized_old_log_stds_var = (
                old_log_stds_var - tf.math.log(y_std_var))

            dist = self._dist = DiagonalGaussian(output_dim)

            normalized_dist_info_vars = dict(
                mean=normalized_means_var, log_std=normalized_log_stds_var)

            mean_kl = tf.reduce_mean(
                dist.kl_sym(
                    dict(
                        mean=normalized_old_means_var,
                        log_std=normalized_old_log_stds_var),
                    normalized_dist_info_vars,
                ))

            loss = -tf.reduce_mean(
                dist.log_likelihood_sym(normalized_ys_var,
                                        normalized_dist_info_vars))

            self._f_predict = tensor_utils.compile_function([xs_var],
                                                            means_var)
            self._f_pdists = tensor_utils.compile_function(
                [xs_var], [means_var, log_stds_var])
            self._l_mean = l_mean
            self._l_log_std = l_log_std

            optimizer_args = dict(
                loss=loss,
                target=self,
                network_outputs=[
                    normalized_means_var, normalized_log_stds_var
                ],
            )

            if use_trust_region:
                optimizer_args['leq_constraint'] = (mean_kl, max_kl_step)
                optimizer_args['inputs'] = [
                    xs_var, ys_var, old_means_var, old_log_stds_var
                ]
            else:
                optimizer_args['inputs'] = [xs_var, ys_var]

            self._optimizer.update_opt(**optimizer_args)

            self._use_trust_region = use_trust_region
            self._name = name

            self._normalize_inputs = normalize_inputs
            self._normalize_outputs = normalize_outputs
            self._mean_network = mean_network
            self._x_mean_var = x_mean_var
            self._x_std_var = x_std_var
            self._y_mean_var = y_mean_var
            self._y_std_var = y_std_var
コード例 #7
0
ファイル: gaussian_lstm_policy.py プロジェクト: wyjw/garage
    def __init__(
        self,
        env_spec,
        name='GaussianLSTMPolicy',
        hidden_dim=32,
        hidden_nonlinearity=tf.tanh,
        recurrent_nonlinearity=tf.nn.sigmoid,
        recurrent_w_x_init=L.XavierUniformInitializer(),
        recurrent_w_h_init=L.OrthogonalInitializer(),
        output_nonlinearity=None,
        output_w_init=L.XavierUniformInitializer(),
        feature_network=None,
        state_include_action=True,
        learn_std=True,
        init_std=1.0,
        lstm_layer_cls=L.LSTMLayer,
        use_peepholes=False,
        std_share_network=False,
    ):
        """
        :param env_spec: A spec for the env.
        :param hidden_dim: dimension of hidden layer
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :return:
        """
        assert isinstance(env_spec.action_space, akro.Box)

        self._mean_network_name = 'mean_network'
        self._std_network_name = 'std_network'
        with tf.variable_scope(name, 'GaussianLSTMPolicy'):
            Serializable.quick_init(self, locals())
            super(GaussianLSTMPolicy, self).__init__(env_spec)

            obs_dim = env_spec.observation_space.flat_dim
            action_dim = env_spec.action_space.flat_dim

            if state_include_action:
                input_dim = obs_dim + action_dim
            else:
                input_dim = obs_dim

            l_input = L.InputLayer(shape=(None, None, input_dim), name='input')

            if feature_network is None:
                feature_dim = input_dim
                l_flat_feature = None
                l_feature = l_input
            else:
                feature_dim = feature_network.output_layer.output_shape[-1]
                l_flat_feature = feature_network.output_layer
                l_feature = L.OpLayer(
                    l_flat_feature,
                    extras=[l_input],
                    name='reshape_feature',
                    op=lambda flat_feature, input: tf.reshape(
                        flat_feature,
                        tf.stack([
                            tf.shape(input)[0],
                            tf.shape(input)[1], feature_dim
                        ])),
                    shape_op=lambda _, input_shape:
                    (input_shape[0], input_shape[1], feature_dim))

            if std_share_network:
                mean_network = LSTMNetwork(
                    input_shape=(feature_dim, ),
                    input_layer=l_feature,
                    output_dim=2 * action_dim,
                    hidden_dim=hidden_dim,
                    hidden_nonlinearity=hidden_nonlinearity,
                    recurrent_nonlinearity=recurrent_nonlinearity,
                    recurrent_w_x_init=recurrent_w_x_init,
                    recurrent_w_h_init=recurrent_w_h_init,
                    output_nonlinearity=output_nonlinearity,
                    output_w_init=output_w_init,
                    lstm_layer_cls=lstm_layer_cls,
                    name='lstm_mean_network',
                    use_peepholes=use_peepholes,
                )

                l_mean = L.SliceLayer(
                    mean_network.output_layer,
                    slice(action_dim),
                    name='mean_slice',
                )

                l_step_mean = L.SliceLayer(
                    mean_network.step_output_layer,
                    slice(action_dim),
                    name='step_mean_slice',
                )

                l_log_std = L.SliceLayer(
                    mean_network.output_layer,
                    slice(action_dim, 2 * action_dim),
                    name='log_std_slice',
                )

                l_step_log_std = L.SliceLayer(
                    mean_network.step_output_layer,
                    slice(action_dim, 2 * action_dim),
                    name='step_log_std_slice',
                )
            else:
                mean_network = LSTMNetwork(
                    input_shape=(feature_dim, ),
                    input_layer=l_feature,
                    output_dim=action_dim,
                    hidden_dim=hidden_dim,
                    hidden_nonlinearity=hidden_nonlinearity,
                    recurrent_nonlinearity=recurrent_nonlinearity,
                    recurrent_w_x_init=recurrent_w_x_init,
                    recurrent_w_h_init=recurrent_w_h_init,
                    output_nonlinearity=output_nonlinearity,
                    output_w_init=output_w_init,
                    lstm_layer_cls=lstm_layer_cls,
                    name='lstm_mean_network',
                    use_peepholes=use_peepholes,
                )

                l_mean = mean_network.output_layer

                l_step_mean = mean_network.step_output_layer

                l_log_std = L.ParamLayer(
                    mean_network.input_layer,
                    num_units=action_dim,
                    param=tf.constant_initializer(np.log(init_std)),
                    name='output_log_std',
                    trainable=learn_std,
                )

                l_step_log_std = L.ParamLayer(
                    mean_network.step_input_layer,
                    num_units=action_dim,
                    param=l_log_std.param,
                    name='step_output_log_std',
                    trainable=learn_std,
                )

            self.mean_network = mean_network
            self.feature_network = feature_network
            self.l_input = l_input
            self.state_include_action = state_include_action
            self.name = name

            flat_input_var = tf.placeholder(dtype=tf.float32,
                                            shape=(None, input_dim),
                                            name='flat_input')
            if feature_network is None:
                feature_var = flat_input_var
            else:
                feature_var = L.get_output(
                    l_flat_feature,
                    {feature_network.input_layer: flat_input_var})

            with tf.name_scope(self._mean_network_name, values=[feature_var]):
                (out_step_mean, out_step_hidden, out_mean_cell) = L.get_output(
                    [
                        l_step_mean, mean_network.step_hidden_layer,
                        mean_network.step_cell_layer
                    ], {mean_network.step_input_layer: feature_var})
                out_step_mean = tf.identity(out_step_mean, 'step_mean')
                out_step_hidden = tf.identity(out_step_hidden, 'step_hidden')
                out_mean_cell = tf.identity(out_mean_cell, 'mean_cell')

            with tf.name_scope(self._std_network_name, values=[feature_var]):
                out_step_log_std = L.get_output(
                    l_step_log_std,
                    {mean_network.step_input_layer: feature_var})
                out_step_log_std = tf.identity(out_step_log_std,
                                               'step_log_std')

            self.f_step_mean_std = tensor_utils.compile_function([
                flat_input_var,
                mean_network.step_prev_state_layer.input_var,
            ], [
                out_step_mean, out_step_log_std, out_step_hidden, out_mean_cell
            ])

            self.l_mean = l_mean
            self.l_log_std = l_log_std

            self.input_dim = input_dim
            self.action_dim = action_dim
            self.hidden_dim = hidden_dim

            self.prev_actions = None
            self.prev_hiddens = None
            self.prev_cells = None
            self.dist = RecurrentDiagonalGaussian(action_dim)

            out_layers = [l_mean, l_log_std]
            if feature_network is not None:
                out_layers.append(feature_network.output_layer)

            LayersPowered.__init__(self, out_layers)
コード例 #8
0
    def __init__(self,
                 input_shape,
                 output_dim,
                 hidden_dim,
                 name=None,
                 hidden_nonlinearity=tf.nn.relu,
                 lstm_layer_cls=ly.LSTMLayer,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None,
                 forget_bias=1.0,
                 use_peepholes=False,
                 layer_args=None):
        with tf.variable_scope(name, "LSTMNetwork"):
            if input_layer is None:
                l_in = ly.InputLayer(
                    shape=(None, None) + input_shape,
                    input_var=input_var,
                    name="input")
            else:
                l_in = input_layer
            l_step_input = ly.InputLayer(
                shape=(None, ) + input_shape, name="step_input")
            # contains previous hidden and cell state
            l_step_prev_state = ly.InputLayer(
                shape=(None, hidden_dim * 2), name="step_prev_state")
            if layer_args is None:
                layer_args = dict()
            l_lstm = lstm_layer_cls(
                l_in,
                num_units=hidden_dim,
                hidden_nonlinearity=hidden_nonlinearity,
                hidden_init_trainable=False,
                name="lstm_layer",
                forget_bias=forget_bias,
                cell_init_trainable=False,
                use_peepholes=use_peepholes,
                **layer_args)
            l_lstm_flat = ly.ReshapeLayer(
                l_lstm, shape=(-1, hidden_dim), name="lstm_flat")
            l_output_flat = ly.DenseLayer(
                l_lstm_flat,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output_flat")
            l_output = ly.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input: tf.reshape(
                    flat_output,
                    tf.stack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))
                ),
                shape_op=lambda flat_output_shape, l_input_shape: (
                    l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output")
            l_step_state = l_lstm.get_step_layer(
                l_step_input, l_step_prev_state, name="step_state")
            l_step_hidden = ly.SliceLayer(
                l_step_state, indices=slice(hidden_dim), name="step_hidden")
            l_step_cell = ly.SliceLayer(
                l_step_state,
                indices=slice(hidden_dim, None),
                name="step_cell")
            l_step_output = ly.DenseLayer(
                l_step_hidden,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                w=l_output_flat.w,
                b=l_output_flat.b,
                name="step_output")

            self._l_in = l_in
            self._hid_init_param = l_lstm.h0
            self._cell_init_param = l_lstm.c0
            self._l_lstm = l_lstm
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_cell = l_step_cell
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim