def __init__( self, name, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-6, max_std=1000.0, std_modifier=1.0, std_hidden_nonlinearity=tf.nn.tanh, hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=tf.identity, mean_network=None, std_network=None, std_parametrization='exp', grad_step_size=1.0, stop_grad=False, extra_input_dim=0, # metalearn_baseline=False, ): """ :param env_spec: :param hidden_sizes: list of sizes for the fully-connected hidden layers :param learn_std: Is std trainable :param init_std: Initial std :param adaptive_std: :param std_share_network: :param std_hidden_sizes: list of sizes for the fully-connected layers for std :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues :param std_hidden_nonlinearity: :param hidden_nonlinearity: nonlinearity used for each hidden layer :param output_nonlinearity: nonlinearity for the output layer :param mean_network: custom network for the output mean :param std_network: custom network for the output log std :param std_parametrization: how the std should be parametrized. There are a few options: - exp: the logarithm of the std will be stored, and applied a exponential transformation - softplus: the std will be computed as log(1+exp(x)) :param grad_step_size: the step size taken in the learner's gradient update, sample uniformly if it is a range e.g. [0.1,1] :param stop_grad: whether or not to stop the gradient through the gradient. :return: """ Serializable.quick_init(self, locals()) #assert isinstance(env_spec.action_space, Box) obs_dim = env_spec.observation_space.flat_dim self.action_dim = env_spec.action_space.flat_dim self.n_hidden = len(hidden_sizes) self.hidden_nonlinearity = hidden_nonlinearity self.output_nonlinearity = output_nonlinearity self.input_shape = ( None, obs_dim + extra_input_dim, ) self.extra_input_dim = extra_input_dim self.step_size = grad_step_size self.stop_grad = stop_grad # self.metalearn_baseline = metalearn_baseline if type(self.step_size) == list: raise NotImplementedError('removing this since it didnt work well') # create network if mean_network is None: self.all_params = self.create_MLP( # TODO: this should not be a method of the policy! --> helper name="mean_network", output_dim=self.action_dim, hidden_sizes=hidden_sizes, ) self.input_tensor, _ = self.forward_MLP( 'mean_network', self.all_params, reuse=None # Need to run this for batch norm ) forward_mean = lambda x, params, is_train: self.forward_MLP( 'mean_network', all_params=params, input_tensor=x, is_training=is_train)[1] else: raise NotImplementedError('Not supported.') if std_network is not None: raise NotImplementedError('Not supported.') else: if adaptive_std: raise NotImplementedError('Not supported.') else: if std_parametrization == 'exp': init_std_param = np.log(init_std) elif std_parametrization == 'softplus': init_std_param = np.log(np.exp(init_std) - 1) else: raise NotImplementedError self.all_params['std_param'] = make_param_layer( num_units=self.action_dim, param=tf.constant_initializer(init_std_param), name="output_std_param", trainable=learn_std, ) forward_std = lambda x, params: forward_param_layer( x, params['std_param']) self.all_param_vals = None # unify forward mean and forward std into a single function self._forward = lambda obs, params, is_train: (forward_mean( obs, params, is_train), forward_std(obs, params)) self.std_parametrization = std_parametrization if std_parametrization == 'exp': min_std_param = np.log(min_std) max_std_param = np.log(max_std) elif std_parametrization == 'softplus': min_std_param = np.log(np.exp(min_std) - 1) max_std_param = np.log(np.exp(max_std) - 1) else: raise NotImplementedError self.min_std_param = min_std_param # TODO: change these to min_std_param_raw self.max_std_param = max_std_param self.std_modifier = np.float64(std_modifier) #print("initializing max_std debug4", self.min_std_param, self.max_std_param) self._dist = DiagonalGaussian(self.action_dim) self._cached_params = {} super(MAMLGaussianMLPPolicy, self).__init__(env_spec) dist_info_sym = self.dist_info_sym(self.input_tensor, dict(), is_training=False) mean_var = dist_info_sym["mean"] log_std_var = dist_info_sym["log_std"] # pre-update policy self._init_f_dist = tensor_utils.compile_function( inputs=[self.input_tensor], outputs=[mean_var, log_std_var], ) self._cur_f_dist = self._init_f_dist
def __init__( self, env_spec, subsample_factor=1., num_seq_inputs=1, learning_rate=0.01, algo_discount=0.99, repeat=30, repeat_sym=30, momentum=0.5, hidden_sizes=(32,32), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=tf.identity, init_meta_constant=0.0, normalize_inputs=True, normalize_outputs=True, extra_input_dim=0, ): Serializable.quick_init(self, locals()) self.env_spec = env_spec obs_dim = env_spec.observation_space.flat_dim self.action_dim = env_spec.action_space.flat_dim self.n_hidden = len(hidden_sizes) self.hidden_nonlinearity = hidden_nonlinearity self.output_nonlinearity = output_nonlinearity self.input_shape = (None, 2*(obs_dim+extra_input_dim)+3,) self.input_to_discard = extra_input_dim #multiply by 0 the last extra_input_dim elements of obs vector self.obs_mask = np.array([1.0]*obs_dim+[0.]*extra_input_dim) self.learning_rate = learning_rate self.algo_discount = algo_discount self.max_path_length = 100 self._normalize_inputs = normalize_inputs self._normalize_outputs = normalize_outputs # # self._enh_obs_mean_var = tf.Variable( # tf.zeros((1,) + self.input_shape, dtype=tf.float32), # name="enh_obs_mean", # trainable=False # ) # self._enh_obs_std_var = tf.Variable( # tf.ones((1,) + self.input_shape, dtype=tf.float32), # name="enh_obs_std", # trainable=False # ) self.output_dim=1 self._ret_mean_var = tf.Variable( tf.zeros((self.output_dim), dtype=tf.float32), name="ret_mean", trainable=False ) self._ret_std_var = tf.Variable( tf.ones((self.output_dim), dtype=tf.float32), name="ret_std", trainable=False ) self.all_params = self.create_MLP( name="mean_baseline_network", output_dim=1, hidden_sizes=hidden_sizes, ) self.input_tensor, _ = self.forward_MLP('mean_baseline_network', self.all_params, reuse=None) print("debug, input_tensor", self.input_tensor ) self.normalized_input_tensor = normalize_sym(self.input_tensor) self.all_params['meta_constant'] = make_param_layer( num_units=1, param=tf.constant_initializer(init_meta_constant), name="output_bas_meta_constant", trainable=True, ) forward_mean = lambda x, params, is_train: self.forward_MLP('mean_baseline_network',all_params=params, input_tensor=x, is_training=is_train)[1] forward_meta_constant = lambda x, params: forward_param_layer(x, params['meta_constant']) self._forward = lambda normalized_enh_obs, params, is_train: (forward_mean(normalized_enh_obs, params, is_train), forward_meta_constant(normalized_enh_obs, params)) self.all_param_vals = None # sess = tf.get_default_session() # if sess is None: # sess = tf.Session() # sess.run(tf.global_variables_initializer()) self.learning_rate_per_param = OrderedDict(zip(self.all_params.keys(),[tf.Variable(self.learning_rate * tf.ones(tf.shape(self.all_params[key])), trainable=False) for key in self.all_params.keys()])) # sess.run(tf.global_variables_initializer()) self.accumulation = OrderedDict(zip(self.all_params.keys(),[tf.Variable(tf.zeros(tf.shape(self.all_params[key])), trainable=False) for key in self.all_params.keys()])) # self.last_grad = OrderedDict(zip(self.all_params.keys(),[tf.Variable(tf.zeros_like(self.all_params[key]), trainable=False) for key in self.all_params.keys()])) # self._dist = DiagonalGaussian(1) self._cached_params = {} super(MAMLGaussianMLPBaseline, self).__init__(env_spec) normalized_predict_sym = self.normalized_predict_sym(normalized_enh_obs_vars=self.normalized_input_tensor) mean_var = normalized_predict_sym['mean'] * self._ret_std_var + self._ret_mean_var meta_constant_var = normalized_predict_sym['meta_constant'] self._init_f_dist = tensor_utils.compile_function( inputs=[self.input_tensor], outputs=[mean_var,meta_constant_var], ) self._cur_f_dist = self._init_f_dist self.initialized = 30 self.lr_mult = 1.0 self.repeat=repeat self.repeat_sym=repeat_sym self.momentum = momentum
def __init__(self, name, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-6, std_hidden_nonlinearity=tf.nn.tanh, hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=tf.identity, mean_network=None, std_network=None, std_parametrization='exp'): """ :param env_spec: :param hidden_sizes: list of sizes for the fully-connected hidden layers :param learn_std: Is std trainable :param init_std: Initial std :param adaptive_std: :param std_share_network: :param std_hidden_sizes: list of sizes for the fully-connected layers for std :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues :param std_hidden_nonlinearity: :param hidden_nonlinearity: nonlinearity used for each hidden layer :param output_nonlinearity: nonlinearity for the output layer :param mean_network: custom network for the output mean :param std_network: custom network for the output log std :param std_parametrization: how the std should be parametrized. There are a few options: - exp: the logarithm of the std will be stored, and applied a exponential transformation - softplus: the std will be computed as log(1+exp(x)) :return: """ Serializable.quick_init(self, locals()) assert isinstance(env_spec.action_space, Box) obs_dim = env_spec.observation_space.flat_dim action_dim = env_spec.action_space.flat_dim self.all_param_vals = False print('obs_dim ', obs_dim, flush=True) # create network if mean_network is None: self.mean_params = mean_params = self.create_MLP( name="mean_network", input_shape=( None, obs_dim, ), output_dim=action_dim, hidden_sizes=hidden_sizes, ) input_tensor, mean_tensor = self.forward_MLP( 'mean_network', mean_params, n_hidden=len(hidden_sizes), input_shape=(obs_dim, ), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, reuse=None # Needed for batch norm ) # if you want to input your own thing. self._forward_mean = lambda x, is_train: self.forward_MLP( 'mean_network', mean_params, n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1] else: raise NotImplementedError('Chelsea does not support this.') if std_network is not None: raise NotImplementedError( 'Minimal Gaussian MLP does not support this.') else: if adaptive_std: # NOTE - this branch isn't tested raise NotImplementedError( 'Minimal Gaussian MLP doesnt have a tested version of this.' ) self.std_params = std_params = self.create_MLP( name="std_network", input_shape=( None, obs_dim, ), output_dim=action_dim, hidden_sizes=std_hidden_sizes, ) # if you want to input your own thing. self._forward_std = lambda x: self.forward_MLP( 'std_network', std_params, n_hidden=len(hidden_sizes), hidden_nonlinearity=std_hidden_nonlinearity, output_nonlinearity=tf.identity, input_tensor=x)[1] else: if std_parametrization == 'exp': init_std_param = np.log(init_std) elif std_parametrization == 'softplus': init_std_param = np.log(np.exp(init_std) - 1) else: raise NotImplementedError self.std_params = make_param_layer( num_units=action_dim, param=tf.constant_initializer(init_std_param), name="output_std_param", trainable=learn_std, ) self._forward_std = lambda x: forward_param_layer( x, self.std_params) self.std_parametrization = std_parametrization if std_parametrization == 'exp': min_std_param = np.log(min_std) elif std_parametrization == 'softplus': min_std_param = np.log(np.exp(min_std) - 1) else: raise NotImplementedError self.min_std_param = min_std_param self._dist = DiagonalGaussian(action_dim) self._cached_params = {} super(GaussianMLPPolicy, self).__init__(env_spec) dist_info_sym = self.dist_info_sym(input_tensor, dict(), is_training=False) mean_var = dist_info_sym["mean"] log_std_var = dist_info_sym["log_std"] self._f_dist = tensor_utils.compile_function( inputs=[input_tensor], outputs=[mean_var, log_std_var], )
def __init__( self, name, input_shape, output_dim, mean_network=None, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=lambda x: x * 0.0 + tf.Variable( initial_value=-1.0, dtype=tf.float32), # output_nonlinearity=tf.identity, optimizer=None, use_trust_region=True, step_size=0.01, learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), std_nonlinearity=None, normalize_inputs=True, normalize_outputs=True, subsample_factor=1.0): """ :param input_shape: Shape of the input data. :param output_dim: Dimension of output. :param hidden_sizes: Number of hidden units of each layer of the mean network. :param hidden_nonlinearity: Non-linearity used for each layer of the mean network. :param optimizer: Optimizer for minimizing the negative log-likelihood. :param use_trust_region: Whether to use trust region constraint. :param step_size: KL divergence constraint for each iteration :param learn_std: Whether to learn the standard deviations. Only effective if adaptive_std is False. If adaptive_std is True, this parameter is ignored, and the weights for the std network are always learned. :param adaptive_std: Whether to make the std a function of the states. :param std_share_network: Whether to use the same network as the mean. :param std_hidden_sizes: Number of hidden units of each layer of the std network. Only used if `std_share_network` is False. It defaults to the same architecture as the mean. :param std_nonlinearity: Non-linearity used for each layer of the std network. Only used if `std_share_network` is False. It defaults to the same non-linearity as the mean. """ Serializable.quick_init(self, locals()) with tf.variable_scope(name): if optimizer is None: if use_trust_region: optimizer = PenaltyLbfgsOptimizer("optimizer") else: optimizer = LbfgsOptimizer("optimizer") self._optimizer = optimizer self._subsample_factor = subsample_factor if mean_network is None: mean_network = create_MLP( name="mean_network", output_dim=1, hidden_sizes=hidden_sizes, hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, ) forward_mean = lambda x, params, is_train: self.forward_MLP( 'mean_network', all_params=params, input_tensor=x, is_training=is_train)[1] else: raise NotImplementedError('Not supported.') # print("Debug2, mean network is defined here") # mean_network = L.ParamLayer( # incoming=L.InputLayer( # shape=(None,) + input_shape, # name="input_layer"), # num_units=1, # param=tf.constant_initializer(-200.0), # name="mean_network", # trainable=True, # ), # print(mean_network.input_layer) # print("debug4", isinstance(L.InputLayer( # shape=(None,) + input_shape, # name="input_layer"), tuple)) # # l_mean = mean_network # mean_network = MLP( # name="mean_network", # input_shape=input_shape, # output_dim=output_dim, # hidden_sizes=hidden_sizes, # hidden_nonlinearity=hidden_nonlinearity, # output_nonlinearity=output_nonlinearity, # ) # # l_mean = mean_network.output_layer if adaptive_std: # l_log_std = MLP( # name="log_std_network", # input_shape=input_shape, # input_var=mean_network.input_layer.input_var, # output_dim=output_dim, # hidden_sizes=std_hidden_sizes, # hidden_nonlinearity=std_nonlinearity, # output_nonlinearity=None, # ).output_layer raise NotImplementedError('Not supported.') else: # l_log_std = L.ParamLayer( # mean_network.input_layer, # num_units=output_dim, # param=tf.constant_initializer(np.log(init_std)), # name="output_log_std", # trainable=learn_std, # ) self.all_params['std_param'] = make_param_layer( num_units=1, param=tf.constant_initializer(init_std), name="output_std_param", trainable=learn_std, ) forward_std = lambda x, params: forward_param_layer( x, params['std_param']) self.all_param_vals = None LayersPowered.__init__(self, [l_mean, l_log_std]) xs_var = mean_network.input_layer.input_var ys_var = tf.placeholder(dtype=tf.float32, name="ys", shape=(None, output_dim)) old_means_var = tf.placeholder(dtype=tf.float32, name="ys", shape=(None, output_dim)) old_log_stds_var = tf.placeholder(dtype=tf.float32, name="old_log_stds", shape=(None, output_dim)) x_mean_var = tf.Variable( np.zeros((1, ) + input_shape, dtype=np.float32), name="x_mean", ) x_std_var = tf.Variable( np.ones((1, ) + input_shape, dtype=np.float32), name="x_std", ) y_mean_var = tf.Variable( np.zeros((1, output_dim), dtype=np.float32), name="y_mean", ) y_std_var = tf.Variable( np.ones((1, output_dim), dtype=np.float32), name="y_std", ) normalized_xs_var = (xs_var - x_mean_var) / x_std_var normalized_ys_var = (ys_var - y_mean_var) / y_std_var normalized_means_var = L.get_output( l_mean, {mean_network.input_layer: normalized_xs_var}) normalized_log_stds_var = L.get_output( l_log_std, {mean_network.input_layer: normalized_xs_var}) means_var = normalized_means_var * y_std_var + y_mean_var log_stds_var = normalized_log_stds_var + tf.log(y_std_var) normalized_old_means_var = (old_means_var - y_mean_var) / y_std_var normalized_old_log_stds_var = old_log_stds_var - tf.log(y_std_var) ## code added for symbolic prediction, used in constructing the meta-learning objective def normalized_means_var_sym(xs, params): inputs = OrderedDict({mean_network.input_layer: xs}) inputs.update(params) return L.get_output(layer_or_layers=l_mean, inputs=inputs) # normalized_means_var_sym = lambda xs, params: L.get_output(layer_or_layers=l_mean, inputs=OrderedDict({mean_network.input_layer:xs}.) #mean_network.input_layer: (xs-x_mean_var)/x_std_var, # normalized_log_stds_var_sym = L.get_output(l_log_std, {mean_network.input_layer: normalized_xs_var}) means_var_sym = lambda xs, params: normalized_means_var_sym( xs=xs, params=params) * y_std_var + y_mean_var # log_stds_var = normalized_log_stds_var + tf.log(y_std_var) dist = self._dist = DiagonalGaussian(output_dim) normalized_dist_info_vars = dict(mean=normalized_means_var, log_std=normalized_log_stds_var) mean_kl = tf.reduce_mean( dist.kl_sym( dict(mean=normalized_old_means_var, log_std=normalized_old_log_stds_var), normalized_dist_info_vars, )) # loss = - tf.reduce_mean(dist.log_likelihood_sym(normalized_ys_var, normalized_dist_info_vars)) loss = tf.nn.l2_loss(normalized_ys_var - normalized_means_var ) + tf.nn.l2_loss(normalized_log_stds_var) self._f_predict = tensor_utils.compile_function([xs_var], means_var) self._f_pdists = tensor_utils.compile_function( [xs_var], [means_var, log_stds_var]) self._l_mean = l_mean self._l_log_std = l_log_std self._f_predict_sym = means_var_sym self.loss_sym = loss optimizer_args = dict( loss=loss, target=self, network_outputs=[ normalized_means_var, normalized_log_stds_var ], ) if use_trust_region: optimizer_args["leq_constraint"] = (mean_kl, step_size) optimizer_args["inputs"] = [ xs_var, ys_var, old_means_var, old_log_stds_var ] else: optimizer_args["inputs"] = [xs_var, ys_var] self._optimizer.update_opt(**optimizer_args) self._use_trust_region = use_trust_region self._name = name self._normalize_inputs = normalize_inputs self._normalize_outputs = normalize_outputs self._mean_network = mean_network self._x_mean_var = x_mean_var self._x_std_var = x_std_var self._y_mean_var = y_mean_var self._y_std_var = y_std_var
def __init__( self, name, env_spec, hidden_sizes=(32, 32), learn_std=True, init_std=1.0, adaptive_std=False, std_share_network=False, std_hidden_sizes=(32, 32), min_std=1e-6, std_hidden_nonlinearity=tf.nn.tanh, hidden_nonlinearity=tf.nn.tanh, output_nonlinearity=tf.identity, mean_network=None, std_network=None, std_parametrization='exp' ): """ :param env_spec: :param hidden_sizes: list of sizes for the fully-connected hidden layers :param learn_std: Is std trainable :param init_std: Initial std :param adaptive_std: :param std_share_network: :param std_hidden_sizes: list of sizes for the fully-connected layers for std :param min_std: whether to make sure that the std is at least some threshold value, to avoid numerical issues :param std_hidden_nonlinearity: :param hidden_nonlinearity: nonlinearity used for each hidden layer :param output_nonlinearity: nonlinearity for the output layer :param mean_network: custom network for the output mean :param std_network: custom network for the output log std :param std_parametrization: how the std should be parametrized. There are a few options: - exp: the logarithm of the std will be stored, and applied a exponential transformation - softplus: the std will be computed as log(1+exp(x)) :return: """ Serializable.quick_init(self, locals()) assert isinstance(env_spec.action_space, Box) obs_dim = env_spec.observation_space.flat_dim action_dim = env_spec.action_space.flat_dim # create network if mean_network is None: self.mean_params = mean_params = self.create_MLP( name="mean_network", input_shape=(None, obs_dim,), output_dim=action_dim, hidden_sizes=hidden_sizes, ) input_tensor, mean_tensor = self.forward_MLP('mean_network', mean_params, n_hidden=len(hidden_sizes), input_shape=(obs_dim,), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, reuse=None # Needed for batch norm ) # if you want to input your own thing. self._forward_mean = lambda x, is_train: self.forward_MLP('mean_network', mean_params, n_hidden=len(hidden_sizes), hidden_nonlinearity=hidden_nonlinearity, output_nonlinearity=output_nonlinearity, input_tensor=x, is_training=is_train)[1] else: raise NotImplementedError('Chelsea does not support this.') if std_network is not None: raise NotImplementedError('Minimal Gaussian MLP does not support this.') else: if adaptive_std: # NOTE - this branch isn't tested raise NotImplementedError('Minimal Gaussian MLP doesnt have a tested version of this.') self.std_params = std_params = self.create_MLP( name="std_network", input_shape=(None, obs_dim,), output_dim=action_dim, hidden_sizes=std_hidden_sizes, ) # if you want to input your own thing. self._forward_std = lambda x: self.forward_MLP('std_network', std_params, n_hidden=len(hidden_sizes), hidden_nonlinearity=std_hidden_nonlinearity, output_nonlinearity=tf.identity, input_tensor=x)[1] else: if std_parametrization == 'exp': init_std_param = np.log(init_std) elif std_parametrization == 'softplus': init_std_param = np.log(np.exp(init_std) - 1) else: raise NotImplementedError self.std_params = make_param_layer( num_units=action_dim, param=tf.constant_initializer(init_std_param), name="output_std_param", trainable=learn_std, ) self._forward_std = lambda x: forward_param_layer(x, self.std_params) self.std_parametrization = std_parametrization if std_parametrization == 'exp': min_std_param = np.log(min_std) elif std_parametrization == 'softplus': min_std_param = np.log(np.exp(min_std) - 1) else: raise NotImplementedError self.min_std_param = min_std_param self._dist = DiagonalGaussian(action_dim) self._cached_params = {} super(GaussianMLPPolicy, self).__init__(env_spec) dist_info_sym = self.dist_info_sym(input_tensor, dict(), is_training=False) mean_var = dist_info_sym["mean"] log_std_var = dist_info_sym["log_std"] self._f_dist = tensor_utils.compile_function( inputs=[input_tensor], outputs=[mean_var, log_std_var], )