def __init__(self,
                 name,
                 env_spec,
                 hidden_sizes=(32, 32),
                 learn_std=True,
                 init_std=1.0,
                 adaptive_std=False,
                 std_share_network=False,
                 std_hidden_sizes=(32, 32),
                 min_std=1e-6,
                 std_hidden_nonlinearity=tf.nn.tanh,
                 hidden_nonlinearity=tf.nn.tanh,
                 output_nonlinearity=tf.identity,
                 mean_network=None,
                 std_network=None,
                 std_parametrization='exp'):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden layers
        :param learn_std: Is std trainable
        :param init_std: Initial std
        :param adaptive_std:
        :param std_share_network:
        :param std_hidden_sizes: list of sizes for the fully-connected layers for std
        :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
        :param std_hidden_nonlinearity:
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :param std_network: custom network for the output log std
        :param std_parametrization: how the std should be parametrized. There are a few options:
            - exp: the logarithm of the std will be stored, and applied a exponential transformation
            - softplus: the std will be computed as log(1+exp(x))
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Box)

        obs_dim = env_spec.observation_space.flat_dim
        action_dim = env_spec.action_space.flat_dim

        self.all_param_vals = False

        print('obs_dim ', obs_dim, flush=True)
        # create network
        if mean_network is None:
            self.mean_params = mean_params = self.create_MLP(
                name="mean_network",
                input_shape=(
                    None,
                    obs_dim,
                ),
                output_dim=action_dim,
                hidden_sizes=hidden_sizes,
            )
            input_tensor, mean_tensor = self.forward_MLP(
                'mean_network',
                mean_params,
                n_hidden=len(hidden_sizes),
                input_shape=(obs_dim, ),
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                reuse=None  # Needed for batch norm
            )
            # if you want to input your own thing.
            self._forward_mean = lambda x, is_train: self.forward_MLP(
                'mean_network',
                mean_params,
                n_hidden=len(hidden_sizes),
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                input_tensor=x,
                is_training=is_train)[1]
        else:
            raise NotImplementedError('Chelsea does not support this.')

        if std_network is not None:
            raise NotImplementedError(
                'Minimal Gaussian MLP does not support this.')
        else:
            if adaptive_std:
                # NOTE - this branch isn't tested
                raise NotImplementedError(
                    'Minimal Gaussian MLP doesnt have a tested version of this.'
                )
                self.std_params = std_params = self.create_MLP(
                    name="std_network",
                    input_shape=(
                        None,
                        obs_dim,
                    ),
                    output_dim=action_dim,
                    hidden_sizes=std_hidden_sizes,
                )
                # if you want to input your own thing.
                self._forward_std = lambda x: self.forward_MLP(
                    'std_network',
                    std_params,
                    n_hidden=len(hidden_sizes),
                    hidden_nonlinearity=std_hidden_nonlinearity,
                    output_nonlinearity=tf.identity,
                    input_tensor=x)[1]
            else:
                if std_parametrization == 'exp':
                    init_std_param = np.log(init_std)
                elif std_parametrization == 'softplus':
                    init_std_param = np.log(np.exp(init_std) - 1)
                else:
                    raise NotImplementedError
                self.std_params = make_param_layer(
                    num_units=action_dim,
                    param=tf.constant_initializer(init_std_param),
                    name="output_std_param",
                    trainable=learn_std,
                )
                self._forward_std = lambda x: forward_param_layer(
                    x, self.std_params)

        self.std_parametrization = std_parametrization

        if std_parametrization == 'exp':
            min_std_param = np.log(min_std)
        elif std_parametrization == 'softplus':
            min_std_param = np.log(np.exp(min_std) - 1)
        else:
            raise NotImplementedError

        self.min_std_param = min_std_param

        self._dist = DiagonalGaussian(action_dim)

        self._cached_params = {}

        super(GaussianMLPPolicy, self).__init__(env_spec)

        dist_info_sym = self.dist_info_sym(input_tensor,
                                           dict(),
                                           is_training=False)
        mean_var = dist_info_sym["mean"]
        log_std_var = dist_info_sym["log_std"]

        self._f_dist = tensor_utils.compile_function(
            inputs=[input_tensor],
            outputs=[mean_var, log_std_var],
        )
    def __init__(
            self,
            name,
            env_spec,
            hidden_sizes=(32, 32),
            learn_std=True,
            init_std=1.0,
            adaptive_std=False,
            std_share_network=False,
            std_hidden_sizes=(32, 32),
            min_std=1e-6,
            std_hidden_nonlinearity=tf.nn.tanh,
            hidden_nonlinearity=tf.nn.tanh,
            output_nonlinearity=tf.identity,
            mean_network=None,
            std_network=None,
            std_parametrization='exp'
    ):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden layers
        :param learn_std: Is std trainable
        :param init_std: Initial std
        :param adaptive_std:
        :param std_share_network:
        :param std_hidden_sizes: list of sizes for the fully-connected layers for std
        :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
        :param std_hidden_nonlinearity:
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :param std_network: custom network for the output log std
        :param std_parametrization: how the std should be parametrized. There are a few options:
            - exp: the logarithm of the std will be stored, and applied a exponential transformation
            - softplus: the std will be computed as log(1+exp(x))
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Box)

        obs_dim = env_spec.observation_space.flat_dim
        action_dim = env_spec.action_space.flat_dim

        # create network
        if mean_network is None:
            self.mean_params = mean_params = self.create_MLP(
                name="mean_network",
                input_shape=(None, obs_dim,),
                output_dim=action_dim,
                hidden_sizes=hidden_sizes,
            )
            input_tensor, mean_tensor = self.forward_MLP('mean_network', mean_params, n_hidden=len(hidden_sizes),
                input_shape=(obs_dim,),
                hidden_nonlinearity=hidden_nonlinearity,
                output_nonlinearity=output_nonlinearity,
                reuse=None # Needed for batch norm
            )
            # if you want to input your own thing.
            self._forward_mean = lambda x, is_train: self.forward_MLP('mean_network', mean_params, n_hidden=len(hidden_sizes),
                hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1]
        else:
            raise NotImplementedError('Chelsea does not support this.')

        if std_network is not None:
            raise NotImplementedError('Minimal Gaussian MLP does not support this.')
        else:
            if adaptive_std:
                # NOTE - this branch isn't tested
                raise NotImplementedError('Minimal Gaussian MLP doesnt have a tested version of this.')
                self.std_params = std_params = self.create_MLP(
                    name="std_network",
                    input_shape=(None, obs_dim,),
                    output_dim=action_dim,
                    hidden_sizes=std_hidden_sizes,
                )
                # if you want to input your own thing.
                self._forward_std = lambda x: self.forward_MLP('std_network', std_params, n_hidden=len(hidden_sizes),
                                                                  hidden_nonlinearity=std_hidden_nonlinearity,
                                                                output_nonlinearity=tf.identity,
                                                                input_tensor=x)[1]
            else:
                if std_parametrization == 'exp':
                    init_std_param = np.log(init_std)
                elif std_parametrization == 'softplus':
                    init_std_param = np.log(np.exp(init_std) - 1)
                else:
                    raise NotImplementedError
                self.std_params = make_param_layer(
                    num_units=action_dim,
                    param=tf.constant_initializer(init_std_param),
                    name="output_std_param",
                    trainable=learn_std,
                )
                self._forward_std = lambda x: forward_param_layer(x, self.std_params)

        self.std_parametrization = std_parametrization

        if std_parametrization == 'exp':
            min_std_param = np.log(min_std)
        elif std_parametrization == 'softplus':
            min_std_param = np.log(np.exp(min_std) - 1)
        else:
            raise NotImplementedError

        self.min_std_param = min_std_param

        self._dist = DiagonalGaussian(action_dim)

        self._cached_params = {}

        super(GaussianMLPPolicy, self).__init__(env_spec)

        dist_info_sym = self.dist_info_sym(input_tensor, dict(), is_training=False)
        mean_var = dist_info_sym["mean"]
        log_std_var = dist_info_sym["log_std"]

        self._f_dist = tensor_utils.compile_function(
            inputs=[input_tensor],
            outputs=[mean_var, log_std_var],
        )
Esempio n. 3
0
    def __init__(
        self,
        name,
        env_spec,
        hidden_sizes=(32, 32),
        learn_std=True,
        init_std=1.0,
        adaptive_std=False,
        std_share_network=False,
        std_hidden_sizes=(32, 32),
        min_std=1e-6,
        max_std=1000.0,
        std_modifier=1.0,
        std_hidden_nonlinearity=tf.nn.tanh,
        hidden_nonlinearity=tf.nn.tanh,
        output_nonlinearity=tf.identity,
        mean_network=None,
        std_network=None,
        std_parametrization='exp',
        grad_step_size=1.0,
        stop_grad=False,
        extra_input_dim=0,
        # metalearn_baseline=False,
        input_img_shape=(32, 64, 3),
        conv_hidden_sizes=(),
        conv_filters=[16, 16, 16, 16],
        conv_filter_sizes=[5, 5, 5, 5],
        conv_strides=[2, 2, 2, 1],
        conv_pads=['SAME', 'SAME', 'SAME', 'SAME'],
        conv_output_dim=32,
    ):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden layers
        :param learn_std: Is std trainable
        :param init_std: Initial std
        :param adaptive_std:
        :param std_share_network:
        :param std_hidden_sizes: list of sizes for the fully-connected layers for std
        :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
        :param std_hidden_nonlinearity:
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :param std_network: custom network for the output log std
        :param std_parametrization: how the std should be parametrized. There are a few options:
            - exp: the logarithm of the std will be stored, and applied a exponential transformation
            - softplus: the std will be computed as log(1+exp(x))
        :param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1]
        :param stop_grad: whether or not to stop the gradient through the gradient.
        :return:
        """
        Serializable.quick_init(self, locals())
        #assert isinstance(env_spec.action_space, Box)
        assert len(input_img_shape) > 0, "input_img_shape needs to be bigger"
        obs_dim = env_spec.observation_space.flat_dim - np.prod(
            input_img_shape)
        self.action_dim = env_spec.action_space.flat_dim
        self.n_hidden = len(hidden_sizes)
        self.hidden_nonlinearity = hidden_nonlinearity
        self.output_nonlinearity = output_nonlinearity
        self.input_img_shape = input_img_shape
        self.input_shape = (
            None,
            obs_dim + extra_input_dim + conv_output_dim,
        )
        self.input_total_shape = (
            None,
            np.prod(self.input_img_shape) + obs_dim + extra_input_dim,
        )
        # self.input_shape = self.input_total_shape
        print("debug432", self.input_img_shape, self.input_shape,
              self.input_total_shape)
        self.step_size = grad_step_size
        self.stop_grad = stop_grad
        # self.metalearn_baseline = metalearn_baseline
        if type(self.step_size) == list:
            raise NotImplementedError('removing this since it didnt work well')
        self.cnn = None
        # create network
        if mean_network is None:
            self.all_params = self.create_MLP_params(  # TODO: this should not be a method of the policy! --> helper
                name="mean_network",
                output_dim=self.action_dim,
                hidden_sizes=hidden_sizes,
            )
            self.input_tensor, _ = self.forward_CNN_MLP(
                name='mean_network',
                all_params=self.all_params,
                conv_filters=conv_filters,
                conv_filter_sizes=conv_filter_sizes,
                conv_strides=conv_strides,
                conv_pads=conv_pads,
                conv_output_dim=conv_output_dim,
                conv_hidden_sizes=conv_hidden_sizes,
                input_tensor=None,
                reuse=None  # Need to run this for batch norm
            )
            forward_mean = lambda x, params, is_train: self.forward_CNN_MLP(
                name='mean_network',
                all_params=params,
                conv_filters=conv_filters,
                conv_filter_sizes=conv_filter_sizes,
                conv_strides=conv_strides,
                conv_pads=conv_pads,
                conv_output_dim=conv_output_dim,
                conv_hidden_sizes=conv_hidden_sizes,
                input_tensor=x,
                is_training=is_train)[1]
        else:
            raise NotImplementedError('Not supported.')

        if std_network is not None:
            raise NotImplementedError('Not supported.')
        else:
            if adaptive_std:
                raise NotImplementedError('Not supported.')
            else:
                if std_parametrization == 'exp':
                    init_std_param = np.log(init_std)
                elif std_parametrization == 'softplus':
                    init_std_param = np.log(np.exp(init_std) - 1)
                else:
                    raise NotImplementedError
                self.all_params['std_param'] = make_param_layer(
                    num_units=self.action_dim,
                    param=tf.constant_initializer(init_std_param),
                    name="output_std_param",
                    trainable=learn_std,
                )
                forward_std = lambda x, params: forward_param_layer(
                    x, params['std_param'])
            self.all_param_vals = None

            # unify forward mean and forward std into a single function
            self._forward = lambda obs, params, is_train: (forward_mean(
                obs, params, is_train), forward_std(obs, params))

            self.std_parametrization = std_parametrization

            if std_parametrization == 'exp':
                min_std_param = np.log(min_std)
                max_std_param = np.log(max_std)
            elif std_parametrization == 'softplus':
                min_std_param = np.log(np.exp(min_std) - 1)
                max_std_param = np.log(np.exp(max_std) - 1)
            else:
                raise NotImplementedError

            self.min_std_param = min_std_param  # TODO: change these to min_std_param_raw
            self.max_std_param = max_std_param
            self.std_modifier = np.float64(std_modifier)
            #print("initializing max_std debug4", self.min_std_param, self.max_std_param)

            self._dist = DiagonalGaussian(self.action_dim)

            self._cached_params = {}

            super(MAMLGaussianConvMLPPolicy, self).__init__(env_spec)

            dist_info_sym = self.dist_info_sym(self.input_tensor,
                                               dict(),
                                               is_training=False)
            mean_var = dist_info_sym["mean"]
            log_std_var = dist_info_sym["log_std"]

            # pre-update policy
            self._init_f_dist = tensor_utils.compile_function(
                inputs=[self.input_tensor],
                outputs=[mean_var, log_std_var],
            )
            self._cur_f_dist = self._init_f_dist
            self._cur_f_dist_cnn = tensor_utils.compile_function(
                inputs=[self.input_tensor],
                outputs=L.get_output(self.cnn._l_out))
    def __init__(
            self,
            name,
            env_spec,
            hidden_sizes=(32, 32),
            learn_std=True,
            init_std=1.0,
            adaptive_std=False,
            std_share_network=False,
            std_hidden_sizes=(32, 32),
            min_std=1e-6,
            std_hidden_nonlinearity=tf.nn.tanh,
            hidden_nonlinearity=tf.nn.tanh,
            output_nonlinearity=tf.identity,
            mean_network=None,
            std_network=None,
            std_parametrization='exp',
            grad_step_size=1.0,    #
            stop_grad=False,  #
    ):
        """
        :param env_spec:
        :param hidden_sizes: list of sizes for the fully-connected hidden layers
        :param learn_std: Is std trainable
        :param init_std: Initial std
        :param adaptive_std:
        :param std_share_network:
        :param std_hidden_sizes: list of sizes for the fully-connected layers for std
        :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues
        :param std_hidden_nonlinearity:
        :param hidden_nonlinearity: nonlinearity used for each hidden layer
        :param output_nonlinearity: nonlinearity for the output layer
        :param mean_network: custom network for the output mean
        :param std_network: custom network for the output log std输出日志标准的自定义网络
        :param std_parametrization: how the std should be parametrized. There are a few options:
            - exp: the logarithm of the std will be stored, and applied a exponential transformation
            - softplus: the std will be computed as log(1+exp(x))
        :param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1]
        在学习者的梯度更新中采用的步长,如果它是一个范围,则均匀地采样,例如
        :param stop_grad: whether or not to stop the gradient through the gradient.是否通过渐变停止渐变。
        :return:
        """
        Serializable.quick_init(self, locals())
        assert isinstance(env_spec.action_space, Box)

        obs_dim = env_spec.observation_space.flat_dim
        self.action_dim = env_spec.action_space.flat_dim
        self.n_hidden = len(hidden_sizes)
        self.hidden_nonlinearity = hidden_nonlinearity
        self.output_nonlinearity = output_nonlinearity
        self.input_shape = (None, obs_dim,)
        self.step_size = grad_step_size
        self.stop_grad = stop_grad
        if type(self.step_size) == list:
            raise NotImplementedError('removing this since it didnt work well')

        # create network创建网络
        if mean_network is None:
            self.all_params = self.create_MLP(  # TODO: this should not be a method of the policy! --> helper
                name="mean_network",
                output_dim=self.action_dim,
                hidden_sizes=hidden_sizes,
            )
            self.input_tensor, _ = self.forward_MLP('mean_network', self.all_params,
                reuse=None # Need to run this for batch norm需要运行此批处理规范
            )
            forward_mean = lambda x, params, is_train: self.forward_MLP('mean_network', params,
                input_tensor=x, is_training=is_train)[1]
        else:
            raise NotImplementedError('Not supported.')

        if std_network is not None:
            raise NotImplementedError('Not supported.')
        else:
            if adaptive_std:
                raise NotImplementedError('Not supported.')
            else:
                if std_parametrization == 'exp':
                    init_std_param = np.log(init_std)
                elif std_parametrization == 'softplus':
                    init_std_param = np.log(np.exp(init_std) - 1)
                else:
                    raise NotImplementedError
                self.all_params['std_param'] = make_param_layer(
                    num_units=self.action_dim,
                    param=tf.constant_initializer(init_std_param),
                    name="output_std_param",
                    trainable=learn_std,
                )
                forward_std = lambda x, params: forward_param_layer(x, params['std_param'])
            self.all_param_vals = None

            # unify forward mean and forward std into a single function统一前向均值并将std转换为单个函数
            self._forward = lambda obs, params, is_train: (
                    forward_mean(obs, params, is_train), forward_std(obs, params))

            self.std_parametrization = std_parametrization

            if std_parametrization == 'exp':
                min_std_param = np.log(min_std)
            elif std_parametrization == 'softplus':
                min_std_param = np.log(np.exp(min_std) - 1)
            else:
                raise NotImplementedError

            self.min_std_param = min_std_param

            self._dist = DiagonalGaussian(self.action_dim)

            self._cached_params = {}

            super(MAMLGaussianMLPPolicy, self).__init__(env_spec)

            dist_info_sym = self.dist_info_sym(self.input_tensor, dict(), is_training=False)
            mean_var = dist_info_sym["mean"]
            log_std_var = dist_info_sym["log_std"]

            # pre-update policy更新前的政策
            self._init_f_dist = tensor_utils.compile_function(
                inputs=[self.input_tensor],
                outputs=[mean_var, log_std_var],
            )
            self._cur_f_dist = self._init_f_dist
    def __init__(
        self,
        env_spec,
        subsample_factor=1.,
        num_seq_inputs=1,
        learning_rate=0.01,
        algo_discount=0.99,
        repeat=30,
        repeat_sym=30,
        momentum=0.5,
        hidden_sizes=(32, 32),
        hidden_nonlinearity=tf.nn.relu,
        output_nonlinearity=tf.identity,
        init_meta_constant=0.0,
        normalize_inputs=True,
        normalize_outputs=True,
        extra_input_dim=0,
    ):
        Serializable.quick_init(self, locals())

        self.env_spec = env_spec
        obs_dim = env_spec.observation_space.flat_dim
        self.action_dim = env_spec.action_space.flat_dim
        self.n_hidden = len(hidden_sizes)
        self.hidden_nonlinearity = hidden_nonlinearity
        self.output_nonlinearity = output_nonlinearity
        self.input_shape = (
            None,
            2 * (obs_dim + extra_input_dim) + 3,
        )
        self.input_to_discard = extra_input_dim  #multiply by 0 the last extra_input_dim elements of obs vector
        self.obs_mask = np.array([1.0] * obs_dim + [0.] * extra_input_dim)
        self.learning_rate = learning_rate
        self.algo_discount = algo_discount
        self.max_path_length = 100
        self._normalize_inputs = normalize_inputs
        self._normalize_outputs = normalize_outputs

        #
        # self._enh_obs_mean_var = tf.Variable(
        #     tf.zeros((1,) + self.input_shape, dtype=tf.float32),
        #     name="enh_obs_mean",
        #     trainable=False
        # )
        # self._enh_obs_std_var = tf.Variable(
        #     tf.ones((1,) + self.input_shape, dtype=tf.float32),
        #     name="enh_obs_std",
        #     trainable=False
        # )
        self.output_dim = 1
        self._ret_mean_var = tf.Variable(tf.zeros((self.output_dim),
                                                  dtype=tf.float32),
                                         name="ret_mean",
                                         trainable=False)
        self._ret_std_var = tf.Variable(tf.ones((self.output_dim),
                                                dtype=tf.float32),
                                        name="ret_std",
                                        trainable=False)

        self.all_params = self.create_MLP(
            name="mean_baseline_network",
            output_dim=1,
            hidden_sizes=hidden_sizes,
        )
        self.input_tensor, _ = self.forward_MLP('mean_baseline_network',
                                                self.all_params,
                                                reuse=None)
        print("debug, input_tensor", self.input_tensor)
        self.normalized_input_tensor = normalize_sym(self.input_tensor)

        self.all_params['meta_constant'] = make_param_layer(
            num_units=1,
            param=tf.constant_initializer(init_meta_constant),
            name="output_bas_meta_constant",
            trainable=True,
        )
        forward_mean = lambda x, params, is_train: self.forward_MLP(
            'mean_baseline_network',
            all_params=params,
            input_tensor=x,
            is_training=is_train)[1]
        forward_meta_constant = lambda x, params: forward_param_layer(
            x, params['meta_constant'])
        self._forward = lambda normalized_enh_obs, params, is_train: (
            forward_mean(normalized_enh_obs, params, is_train),
            forward_meta_constant(normalized_enh_obs, params))
        self.all_param_vals = None

        # sess = tf.get_default_session()
        # if sess is None:
        #     sess = tf.Session()
        # sess.run(tf.global_variables_initializer())

        self.learning_rate_per_param = OrderedDict(
            zip(self.all_params.keys(), [
                tf.Variable(self.learning_rate *
                            tf.ones(tf.shape(self.all_params[key])),
                            trainable=False) for key in self.all_params.keys()
            ]))
        # sess.run(tf.global_variables_initializer())
        self.accumulation = OrderedDict(
            zip(self.all_params.keys(), [
                tf.Variable(tf.zeros(tf.shape(self.all_params[key])),
                            trainable=False) for key in self.all_params.keys()
            ]))
        # self.last_grad = OrderedDict(zip(self.all_params.keys(),[tf.Variable(tf.zeros_like(self.all_params[key]), trainable=False) for key in self.all_params.keys()]))

        # self._dist = DiagonalGaussian(1)
        self._cached_params = {}
        super(MAMLGaussianMLPBaseline, self).__init__(env_spec)

        normalized_predict_sym = self.normalized_predict_sym(
            normalized_enh_obs_vars=self.normalized_input_tensor)
        mean_var = normalized_predict_sym[
            'mean'] * self._ret_std_var + self._ret_mean_var
        meta_constant_var = normalized_predict_sym['meta_constant']

        self._init_f_dist = tensor_utils.compile_function(
            inputs=[self.input_tensor],
            outputs=[mean_var, meta_constant_var],
        )
        self._cur_f_dist = self._init_f_dist
        self.initialized = 30
        self.lr_mult = 1.0
        self.repeat = repeat
        self.repeat_sym = repeat_sym
        self.momentum = momentum