def __init__( self, name, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, ): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") else: l_in = input_layer self._layers = [l_in] l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_hid = L.batch_norm(l_hid) self._layers.append(l_hid) l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization) if batch_normalization: l_out = L.batch_norm(l_out) self._layers.append(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var self._output = L.get_output(l_out) LayersPowered.__init__(self, l_out)
def create_MLP( self, name, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, ): with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") else: l_in = input_layer all_layers = [l_in] l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_hid = L.batch_norm(l_hid) all_layers.append(l_hid) l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization) if batch_normalization: l_out = L.batch_norm(l_out) all_layers.append(l_out) output = L.get_output(l_out) # returns layers(), input_layer, output_layer, input_var, output return all_layers, l_in, l_out, l_in.input_var, output
def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, ): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input") else: l_in = input_layer self._layers = [l_in] l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer( l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization ) if batch_normalization: l_hid = L.batch_norm(l_hid) self._layers.append(l_hid) l_out = L.DenseLayer( l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization ) if batch_normalization: l_out = L.batch_norm(l_out) self._layers.append(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var self._output = L.get_output(l_out) LayersPowered.__init__(self, l_out)
def __init__(self, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, bn=False, dropout=.05): Serializable.quick_init(self, locals()) l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions") n_layers = len(hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(hidden_sizes): if bn: l_hidden = batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1)) l_hidden = L.DropoutLayer(l_hidden, dropout) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer(l_hidden, num_units=1, nonlinearity=output_nonlinearity, name="output") output_var = L.get_output(l_output, deterministic=True) output_var_drop = L.get_output(l_output, deterministic=False) self._f_qval = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var) self._f_qval_drop = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var_drop) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self._output_nonlinearity = output_nonlinearity LayersPowered.__init__(self, [l_output])
def __init__( self, env_spec, hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, bn=False): Serializable.quick_init(self, locals()) l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions") n_layers = len(hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(hidden_sizes): if bn: l_hidden = batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer( l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1) ) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer( l_hidden, num_units=1, nonlinearity=output_nonlinearity, name="output" ) output_var = L.get_output(l_output, deterministic=True) self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self._output_nonlinearity = output_nonlinearity LayersPowered.__init__(self, [l_output])
def __init__(self, env_spec, name='qnet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, eqf_use_full_qf=False, eqf_sample_size=1, mqprop=False, bn=False): Serializable.quick_init(self, locals()) assert not env_spec.action_space.is_discrete self._env_spec = env_spec with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions") n_layers = len(hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(hidden_sizes): if bn: l_hidden = batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1)) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer(l_hidden, num_units=1, nonlinearity=output_nonlinearity, name="output") output_var = L.get_output(l_output, deterministic=True) output_var = tf.reshape(output_var, (-1, )) self._f_qval = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self._output_nonlinearity = output_nonlinearity self.eqf_use_full_qf = eqf_use_full_qf self.eqf_sample_size = eqf_sample_size self.mqprop = mqprop LayersPowered.__init__(self, [l_output])
def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, latent_dim=0, latent_shape=None, obs_shape=None): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") else: l_in = input_layer # latent_in = L.InputLayer(shape=(None,) + latent_shape, input_var=l_in.input_var[:, -latent_dim:], name='latent') # obs_in = L.InputLayer(shape=(None,) + obs_shape, input_var=l_in.input_var[:, :-latent_dim], name='obs_input') latent_in = L.SliceLayer(l_in, slice(-latent_dim, None, None), axis=-1) obs_in = L.SliceLayer(l_in, slice(0, -latent_dim, None), axis=-1) self._layers = [obs_in] l_hid = obs_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_hid = L.batch_norm(l_hid) self._layers.append(l_hid) l_latent_out = L.DenseLayer( latent_in, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_latent_0", W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_latent_out = L.batch_norm(l_latent_out) self._layers.append(l_latent_out) l_hid = L.ElemwiseSumLayer([l_hid, l_latent_out]) # l_hid = L.OpLayer( # l_hid, # op=lambda l_hid, l_latent: # l_hid + l_latent, # shape_op=lambda l_hid_shape, l_latent_shape: # l_hid_shape, # extras=[l_latent_out], # name='sum_obs_latent') l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization) if batch_normalization: l_out = L.batch_norm(l_out) self._layers.append(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var self._output = L.get_output(l_out) LayersPowered.__init__(self, l_out)
def __init__(self, name, input_shape, output_dim, conv_filters, conv_filter_sizes, conv_strides, conv_pads, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False): Serializable.quick_init(self, locals()) """ A network composed of several convolution layers followed by some fc layers. input_shape: (width,height,channel) HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on. conv_filters: a list of numbers of convolution kernel conv_filter_sizes: a list of sizes (int) of the convolution kernels conv_strides: a list of strides (int) of the conv kernels conv_pads: a list of pad formats (either 'SAME' or 'VALID') hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers hidden_sizes: a list of numbers of hidden units for all fc layers """ with tf.variable_scope(name): if input_layer is not None: l_in = input_layer l_hid = l_in elif len(input_shape) == 3: l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input") l_hid = L.reshape(l_in, ([0], ) + input_shape, name="reshape_input") elif len(input_shape) == 2: l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input") input_shape = (1, ) + input_shape l_hid = L.reshape(l_in, ([0], ) + input_shape, name="reshape_input") else: l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, conv_filter, filter_size, stride, pad in zip( range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads, ): l_hid = L.Conv2DLayer( l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name="conv_hidden_%d" % idx, weight_normalization=weight_normalization, ) if batch_normalization: l_hid = L.batch_norm(l_hid) if output_nonlinearity == L.spatial_expected_softmax: assert len(hidden_sizes) == 0 assert output_dim == conv_filters[-1] * 2 l_hid.nonlinearity = tf.identity l_out = L.SpatialExpectedSoftmaxLayer(l_hid) else: l_hid = L.flatten(l_hid, name="conv_flatten") for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer( l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization, ) if batch_normalization: l_hid = L.batch_norm(l_hid) l_out = L.DenseLayer( l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization, ) if batch_normalization: l_out = L.batch_norm(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var LayersPowered.__init__(self, l_out)
def __init__(self, name, input_shape, output_dim, conv_filters, conv_filter_sizes, conv_strides, conv_pads, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False): Serializable.quick_init(self, locals()) """ A network composed of several convolution layers followed by some fc layers. input_shape: (width,height,channel) HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on. conv_filters: a list of numbers of convolution kernel conv_filter_sizes: a list of sizes (int) of the convolution kernels conv_strides: a list of strides (int) of the conv kernels conv_pads: a list of pad formats (either 'SAME' or 'VALID') hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers hidden_sizes: a list of numbers of hidden units for all fc layers """ with tf.variable_scope(name): if input_layer is not None: l_in = input_layer l_hid = l_in elif len(input_shape) == 3: l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input") l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input") elif len(input_shape) == 2: l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input") input_shape = (1,) + input_shape l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input") else: l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input") l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) critical_size = hidden_sizes[0] for idx, conv_filter, filter_size, stride, pad in zip(range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads, ): l_hid = L.Conv2DLayer( l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name="SL_conv_hidden_%d" % idx, weight_normalization=weight_normalization, ) if batch_normalization: l_hid = L.batch_norm(l_hid) l_hid = L.flatten(l_hid, name="conv_flatten") critical_layer = L.DenseLayer( l_hid, num_units=hidden_sizes[0], nonlinearity=None, name="SL_fc", W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization, ) #critical_layer = L.flatten(critical_layer) # if output_nonlinearity == L.spatial_expected_softmax: # assert len(hidden_sizes) == 0 # assert output_dim == conv_filters[-1] * 2 # l_hid.nonlinearity = tf.identity # l_out = L.SpatialExpectedSoftmaxLayer(l_hid) self.actValues = L.get_output(critical_layer) #list_rem = hidden_sizes[1:] #####Forward pass block################################# with tf.variable_scope("PG"): # fcFor = L.DenseLayer( # critical_layer, # num_units = hidden_sizes[1], # nonlinearity=hidden_nonlinearity, # name="pgLayer_init", # W=hidden_W_init, # b=hidden_b_init, # weight_normalization=weight_normalization, # ) fc_1 = L.DenseLayer( critical_layer, num_units=hidden_sizes[1], nonlinearity=hidden_nonlinearity, name="pgLayer_1", W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization, ) fc_2 = L.DenseLayer( fc_1, num_units=hidden_sizes[2], nonlinearity=hidden_nonlinearity, name="pgLayer_2" , W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization, ) if batch_normalization: fc_2 = L.batch_norm(fcFor) fcOut = L.DenseLayer( fc_2, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization, ) if batch_normalization: fcOut = L.batch_norm(fcOut) ################################################### self.actVariable = tf.Variable(initial_value = tf.zeros([ 10000, 32], dtype = tf.float32),name = "act_var1", trainable = True) bcOut = fcOut.get_output_for(fc_2.get_output_for(fc_1.get_output_for(self.actVariable))) self.bcOut = bcOut backOutLayer = L.InputLayer(shape = (), input_var= bcOut , name="OutputLayer") # shape is (actVariable[0] , 2) self._l_in = l_in self.forwardOutLayer = fcOut self.backOutLayer = backOutLayer outLayers = [fcOut, backOutLayer] # self._input_var = l_in.input_var LayersPowered.__init__(self, outLayers)
def __init__(self, env_spec, name='nafqnet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=0, output_nonlinearity=None, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=L.ZerosInitializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=L.ZerosInitializer(), bn=False): Serializable.quick_init(self, locals()) assert not env_spec.action_space.is_discrete action_dim = env_spec.action_space.flat_dim self._action_dim = action_dim self._env_spec = env_spec n_layers = len(hidden_sizes) action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions") l_policy_mu = L.InputLayer(shape=(None, action_dim), name="policy_mu") l_policy_sigma = L.InputLayer(shape=(None, action_dim, action_dim), name="policy_sigma") l_hidden = l_obs idx = 0 l_hidden_kwargs = dict( W=hidden_W_init, b=hidden_b_init, nonlinearity=hidden_nonlinearity, ) l_output_kwargs = dict( W=output_W_init, b=output_b_init, ) while idx < action_merge_layer: if bn: l_hidden = L.batch_norm(l_hidden) l_hidden = L.DenseLayer( l_hidden, num_units=hidden_sizes[idx], name="h%d" % (idx + 1), **l_hidden_kwargs, ) idx += 1 _idx = idx _l_hidden = l_hidden # compute L network while idx < n_layers: if bn: l_hidden = L.batch_norm(l_hidden) l_hidden = L.DenseLayer( l_hidden, num_units=hidden_sizes[idx], name="L_h%d" % (idx + 1), **l_hidden_kwargs, ) idx += 1 l_L = L.DenseLayer( l_hidden, num_units=action_dim**2, nonlinearity=None, name="L_h%d" % (idx + 1), **l_output_kwargs, ) # compute V network idx = _idx l_hidden = _l_hidden while idx < n_layers: if bn: l_hidden = L.batch_norm(l_hidden) l_hidden = L.DenseLayer( l_hidden, num_units=hidden_sizes[idx], name="V_h%d" % (idx + 1), **l_hidden_kwargs, ) idx += 1 l_V = L.DenseLayer( l_hidden, num_units=1, nonlinearity=None, name="V_h%d" % (idx + 1), **l_output_kwargs, ) # compute mu network idx = _idx l_hidden = _l_hidden while idx < n_layers: if bn: l_hidden = L.batch_norm(l_hidden) l_hidden = L.DenseLayer( l_hidden, num_units=hidden_sizes[idx], name="mu_h%d" % (idx + 1), **l_hidden_kwargs, ) idx += 1 if bn: l_hidden = L.batch_norm(l_hidden) l_mu = L.DenseLayer( l_hidden, num_units=action_dim, nonlinearity=tf.nn.tanh, name="mu_h%d" % (idx + 1), **l_output_kwargs, ) L_var, V_var, mu_var = L.get_output([l_L, l_V, l_mu], deterministic=True) V_var = tf.reshape(V_var, (-1, )) # compute advantage L_mat_var = self.get_L_sym(L_var) P_var = self.get_P_sym(L_mat_var) A_var = self.get_A_sym(P_var, mu_var, l_action.input_var) # compute Q Q_var = A_var + V_var # compute expected Q under Gaussian policy e_A_var = self.get_e_A_sym(P_var, mu_var, l_policy_mu.input_var, l_policy_sigma.input_var) e_Q_var = e_A_var + V_var self._f_qval = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], Q_var) self._f_e_qval = tensor_utils.compile_function([ l_obs.input_var, l_policy_mu.input_var, l_policy_sigma.input_var ], e_Q_var) self._L_layer = l_L self._V_layer = l_V self._mu_layer = l_mu self._obs_layer = l_obs self._action_layer = l_action self._policy_mu_layer = l_policy_mu self._policy_sigma_layer = l_policy_sigma self._output_nonlinearity = output_nonlinearity self.init_policy() LayersPowered.__init__(self, [l_L, l_V, l_mu])
def __init__(self, env_spec, name='MLPPhinet', hidden_sizes=(100, 100), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, vs_form=None, bn=False): Serializable.quick_init(self, locals()) assert not env_spec.action_space.is_discrete self._env_spec = env_spec self.vs_form = vs_form with tf.variable_scope(name): obs_dim = env_spec.observation_space.flat_dim action_dim = env_spec.action_space.flat_dim l_obs = L.InputLayer(shape=(None, obs_dim), name="obs") l_action = L.InputLayer(shape=(None, action_dim), name="action") self.obs_rms = RunningMeanStd(shape=(obs_dim, )) obz = L.NormalizeLayer(l_obs, rms=self.obs_rms, clip_min=-5., clip_max=5.) obs_hidden = L.DenseLayer(obz, num_units=hidden_sizes[0], nonlinearity=hidden_nonlinearity, name="obs_h%d" % (0)) print("hidden sizes...", hidden_sizes[0], hidden_sizes[1:]) act_hidden = L.DenseLayer(l_action, num_units=hidden_sizes[0], nonlinearity=hidden_nonlinearity, name="act_h%d" % (0)) merge_hidden = L.OpLayer(obs_hidden, op=lambda x, y: x + y, shape_op=lambda x, y: y, extras=[act_hidden]) l_hidden = merge_hidden for idx, size in enumerate(hidden_sizes[1:]): if bn: l_hidden = batch_norm(l_hidden) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1)) l_output = L.DenseLayer(l_hidden, num_units=1, nonlinearity=output_nonlinearity, name="output") if vs_form is not None: if vs_form == 'linear': vs = L.DenseLayer(l_obs, num_units=1, nonlinearity=None, name='vs') elif vs_form == 'mlp': vs = L.DenseLayer(l_obs, num_units=64, nonlinearity=tf.nn.relu, name='hidden_vs') vs = L.DenseLayer(vs, num_units=1, nonlinearity=None, name='vs') else: raise NotImplementedError output_var = L.get_output(l_output, deterministic=True) + \ L.get_output(vs, deterministic=True) output_var = tf.reshape(output_var, (-1, )) else: output_var = L.get_output(l_output, deterministic=True) output_var = tf.reshape(output_var, (-1, )) self._f_phival = tensor_utils.compile_function( inputs=[l_obs.input_var, l_action.input_var], outputs=output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self.output_nonlinearity = output_nonlinearity if vs_form is not None: self._output_vs = vs LayersPowered.__init__(self, [l_output, self._output_vs]) else: LayersPowered.__init__(self, [l_output])
def __init__(self, env_spec, name='QuadraticPhinet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, vs_form=None, bn=False, A=None, init_a=1.0, a_parameterization='exp'): Serializable.quick_init(self, locals()) assert not env_spec.action_space.is_discrete self._env_spec = env_spec self.vs_form = vs_form with tf.variable_scope(name): obs_dim = env_spec.observation_space.flat_dim action_dim = env_spec.action_space.flat_dim l_act = L.InputLayer(shape=(None, action_dim), name="action") action_var = l_act.input_var l_obs = L.InputLayer(shape=(None, obs_dim), name="obs") self.obs_rms = RunningMeanStd(shape=(obs_dim, )) obz = L.NormalizeLayer(l_obs, rms=self.obs_rms) l_hidden = l_obs hidden_sizes += (action_dim, ) for idx, size in enumerate(hidden_sizes): if bn: l_hidden = batch_norm(l_hidden) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1)) obs_var = l_obs.input_var fs = l_hidden # fs_network.output_layer if A is not None: l_A_param = A.output_layer else: if a_parameterization == 'exp': init_a_param = np.log(init_a) - .5 elif a_parameterization == 'softplus': init_a_param = np.log(np.exp(init_a) - 1) else: raise NotImplementedError l_log_A = L.ParamLayer( l_obs, num_units=action_dim, param=tf.constant_initializer(init_a_param), name="diagonal_a_matrix", trainable=True) if vs_form is not None: raise NotImplementedError self._l_log_A = l_log_A self.a_parameterization = a_parameterization self.fs = fs if vs_form is not None: self._output_vs = vs LayersPowered.__init__( self, [self.fs, self._l_log_A, self._output_vs]) else: LayersPowered.__init__(self, [self.fs, self._l_log_A]) output_var = self.get_phival_sym(obs_var, action_var) self._f_phival = tensor_utils.compile_function( inputs=[obs_var, action_var], outputs=output_var)
def __init__( self, name, output_dim, hidden_sizes, hidden_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, ): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: assert input_shape is not None, \ "input_layer or input_shape must be supplied" l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") else: l_in = input_layer self._layers = [l_in] l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_hid = L.batch_norm(l_hid) self._layers.append(l_hid) l_out_raw = L.DenseLayer(l_hid, num_units=output_dim, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization) if batch_normalization: l_out_raw = L.batch_norm(l_out_raw) self._layers.append(l_out_raw) # mask assumed to occupy first output_dim elements def mask_op(X): return X[..., :output_dim] def mask_shape_op(old_shape): return old_shape[:-1] + (output_dim, ) mask = L.OpLayer(l_in, mask_op, shape_op=mask_shape_op) self._layers.append(mask) l_out = L.OpLayer(l_out_raw, masked_softmax, extras=[mask]) self._layers.append(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var self._output = L.get_output(l_out) LayersPowered.__init__(self, l_out)
def __init__(self, env_spec, name='Phinet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, bn=False): Serializable.quick_init(self, locals()) assert not env_spec.action_space.is_discrete self._env_spec = env_spec with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="action") n_layers = len(hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 # self.obs_rms = RunningMeanStd(shape=(env_spec.observation_space.flat_dim, )) # obz = L.NormalizeLayer(l_obs, rms=self.obs_rms, clip_min=-5., clip_max=5.) obz = l_obs obs_hidden = L.DenseLayer(obz, num_units=hidden_sizes[0], nonlinearity=hidden_nonlinearity, name="obs_h%d" % (0)) act_hidden = L.DenseLayer(l_action, num_units=hidden_sizes[0], nonlinearity=hidden_nonlinearity, name="act_h%d" % (0)) merge_hidden = L.OpLayer(obs_hidden, op=lambda x, y: x + y, shape_op=lambda x, y: x, extras=[act_hidden]) l_hidden = merge_hidden for idx, size in enumerate(hidden_sizes[1:]): if bn: l_hidden = batch_norm(l_hidden) l_hidden = L.DenseLayer(l_hidden, num_units=size, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1)) # for idx, size in enumerate(hidden_sizes): # if bn: # l_hidden = batch_norm(l_hidden) # if idx == action_merge_layer: # l_hidden = L.ConcatLayer([l_hidden, l_action]) # l_hidden = L.DenseLayer( # l_hidden, # num_units=size, # nonlinearity=hidden_nonlinearity, # name="h%d" % (idx + 1) # ) # if action_merge_layer == n_layers: # l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer(l_hidden, num_units=1, nonlinearity=output_nonlinearity, name="output") output_var = L.get_output(l_output, deterministic=True) output_var = tf.reshape(output_var, (-1, )) self._f_phival = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self.output_nonlinearity = output_nonlinearity LayersPowered.__init__(self, [l_output])
def __init__( self, env_spec, name='qnet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, # hidden_W_init=L.XavierUniformInitializer(), # hidden_b_init=L.ZerosInitializer(), # output_W_init=L.XavierUniformInitializer(), # output_b_init=L.ZerosInitializer(), c=1.0, # temperature variable for stochastic policy bn=False): Serializable.quick_init(self, locals()) # assert env_spec.action_space.is_discrete self._n = 2 self._c = c self._env_spec = env_spec with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, 2), var_type=tf.uint8, name="actions") n_layers = len(hidden_sizes) + 1 l_hidden = l_obs for idx, size in enumerate(hidden_sizes): if bn: l_hidden = L.batch_norm(l_hidden) l_hidden = L.DenseLayer( l_hidden, num_units=size, # W=hidden_W_init, # b=hidden_b_init, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1)) l_output_vec = L.DenseLayer( l_hidden, num_units=2, # W=output_W_init, # b=output_b_init, nonlinearity=output_nonlinearity, name="output") output_vec_var = L.get_output(l_output_vec, deterministic=True) output_var = tf.reduce_sum( output_vec_var * tf.to_float(l_action.input_var), 1) self._f_qval = tensor_utils.compile_function( [l_obs.input_var, l_action.input_var], output_var) self._f_qval_vec = tensor_utils.compile_function([l_obs.input_var], output_vec_var) self._output_vec_layer = l_output_vec self._obs_layer = l_obs self._action_layer = l_action self._output_nonlinearity = output_nonlinearity self.init_policy() LayersPowered.__init__(self, [l_output_vec])
def __init__( self, env_spec, name='qnet', hidden_sizes=(32, 32), hidden_nonlinearity=tf.nn.relu, action_merge_layer=-2, output_nonlinearity=None, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, bn=False): Serializable.quick_init(self, locals()) with tf.variable_scope(name): l_obs = L.InputLayer(shape=(None, env_spec.observation_space.flat_dim), name="obs") l_action = L.InputLayer(shape=(None, env_spec.action_space.flat_dim), name="actions") n_layers = len(hidden_sizes) + 1 if n_layers > 1: action_merge_layer = \ (action_merge_layer % n_layers + n_layers) % n_layers else: action_merge_layer = 1 l_hidden = l_obs for idx, size in enumerate(hidden_sizes): if bn: l_hidden = L.batch_norm(l_hidden) if idx == action_merge_layer: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_hidden = L.DenseLayer( l_hidden, num_units=size, W=hidden_W_init, b=hidden_b_init, nonlinearity=hidden_nonlinearity, name="h%d" % (idx + 1) ) if action_merge_layer == n_layers: l_hidden = L.ConcatLayer([l_hidden, l_action]) l_output = L.DenseLayer( l_hidden, num_units=1, W=output_W_init, b=output_b_init, nonlinearity=output_nonlinearity, name="output" ) #output_var = L.get_output(l_output, deterministic=True).flatten() output_var = tf.reshape(L.get_output(l_output, deterministic=True),(-1,)) self._f_qval = tensor_utils.compile_function([l_obs.input_var, l_action.input_var], output_var) self._output_layer = l_output self._obs_layer = l_obs self._action_layer = l_action self._output_nonlinearity = output_nonlinearity LayersPowered.__init__(self, [l_output])
def __init__(self, name, input_shape, output_dim, conv_filters, conv_filter_sizes, conv_strides, conv_pads, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer, output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, input_var=None, input_layer=None, batch_normalization=False, weight_normalization=False): Serializable.quick_init(self, locals()) """ A network composed of several convolution layers followed by some fc layers. input_shape: (width,height,channel) HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on. conv_filters: a list of numbers of convolution kernel conv_filter_sizes: a list of sizes (int) of the convolution kernels conv_strides: a list of strides (int) of the conv kernels conv_pads: a list of pad formats (either 'SAME' or 'VALID') hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers hidden_sizes: a list of numbers of hidden units for all fc layers """ with tf.variable_scope(name): if input_layer is not None: l_in = input_layer l_hid = l_in elif len(input_shape) == 3: l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input") l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input") elif len(input_shape) == 2: l_in = L.InputLayer(shape=(None, np.prod(input_shape)), input_var=input_var, name="input") input_shape = (1,) + input_shape l_hid = L.reshape(l_in, ([0],) + input_shape, name="reshape_input") else: l_in = L.InputLayer(shape=(None,) + input_shape, input_var=input_var, name="input") l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, conv_filter, filter_size, stride, pad in zip( range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads, ): l_hid = L.Conv2DLayer( l_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name="conv_hidden_%d" % idx, weight_normalization=weight_normalization, ) if batch_normalization: l_hid = L.batch_norm(l_hid) if output_nonlinearity == L.spatial_expected_softmax: assert len(hidden_sizes) == 0 assert output_dim == conv_filters[-1] * 2 l_hid.nonlinearity = tf.identity l_out = L.SpatialExpectedSoftmaxLayer(l_hid) else: l_hid = L.flatten(l_hid, name="conv_flatten") for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer( l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization, ) if batch_normalization: l_hid = L.batch_norm(l_hid) l_out = L.DenseLayer( l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization, ) if batch_normalization: l_out = L.batch_norm(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var LayersPowered.__init__(self, l_out)
def __init__( self, name, output_dim, hidden_sizes, hidden_nonlinearity, dropout_prob, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, ): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") else: l_in = input_layer self._layers = [l_in] ##applying dropout on all layers? l_hid_dropout_input = L.DropoutLayer(l_in, p=dropout_prob) l_hid = l_hid_dropout_input # l_hid = l_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_hid = L.batch_norm(l_hid) self._layers.append(l_hid) ###applying dropout to the last hidden layer? l_hid_dropout = L.DropoutLayer(l_hid, p=dropout_prob) l_out = L.DenseLayer(l_hid_dropout, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization) # l_out = L.DenseLayer( # l_hid, # num_units=output_dim, # nonlinearity=output_nonlinearity, # name="output", # W=output_W_init, # b=output_b_init, # weight_normalization=weight_normalization # ) #Alternative, making output layer the dropout layer # l_out = L.DropoutLayer(l_hid, p=dropout_prob) if batch_normalization: l_out = L.batch_norm(l_out) self._layers.append(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var self._output = L.get_output(l_out) LayersPowered.__init__(self, l_out)