def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu, lstm_layer_cls=L.LSTMLayer, output_nonlinearity=None, input_var=None, input_layer=None, forget_bias=1.0, use_peepholes=False, layer_args=None): with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, None) + input_shape, input_var=input_var, name="input") else: l_in = input_layer l_step_input = L.InputLayer(shape=(None,) + input_shape, name="step_input") # contains previous hidden and cell state l_step_prev_state = L.InputLayer(shape=(None, hidden_dim * 2), name="step_prev_state") if layer_args is None: layer_args = dict() l_lstm = lstm_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False, name="lstm", forget_bias=forget_bias, cell_init_trainable=False, use_peepholes=use_peepholes, **layer_args) l_lstm_flat = L.ReshapeLayer( l_lstm, shape=(-1, hidden_dim), name="lstm_flat" ) l_output_flat = L.DenseLayer( l_lstm_flat, num_units=output_dim, nonlinearity=output_nonlinearity, name="output_flat" ) l_output = L.OpLayer( l_output_flat, op=lambda flat_output, l_input: tf.reshape(flat_output, tf.stack((tf.shape(l_input)[0], tf.shape(l_input)[1], -1))), shape_op=lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]), extras=[l_in], name="output" ) l_step_state = l_lstm.get_step_layer(l_step_input, l_step_prev_state, name="step_state") l_step_hidden = L.SliceLayer(l_step_state, indices=slice(hidden_dim), name="step_hidden") l_step_cell = L.SliceLayer(l_step_state, indices=slice(hidden_dim, None), name="step_cell") l_step_output = L.DenseLayer( l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b, name="step_output" ) self._l_in = l_in self._hid_init_param = l_lstm.h0 self._cell_init_param = l_lstm.c0 self._l_lstm = l_lstm self._l_out = l_output self._l_step_input = l_step_input self._l_step_prev_state = l_step_prev_state self._l_step_hidden = l_step_hidden self._l_step_cell = l_step_cell self._l_step_state = l_step_state self._l_step_output = l_step_output self._hidden_dim = hidden_dim
def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False, latent_dim=0, latent_shape=None, obs_shape=None): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: l_in = L.InputLayer(shape=(None, ) + input_shape, input_var=input_var, name="input") else: l_in = input_layer # latent_in = L.InputLayer(shape=(None,) + latent_shape, input_var=l_in.input_var[:, -latent_dim:], name='latent') # obs_in = L.InputLayer(shape=(None,) + obs_shape, input_var=l_in.input_var[:, :-latent_dim], name='obs_input') latent_in = L.SliceLayer(l_in, slice(-latent_dim, None, None), axis=-1) obs_in = L.SliceLayer(l_in, slice(0, -latent_dim, None), axis=-1) self._layers = [obs_in] l_hid = obs_in if batch_normalization: l_hid = L.batch_norm(l_hid) for idx, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(l_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_hid = L.batch_norm(l_hid) self._layers.append(l_hid) l_latent_out = L.DenseLayer( latent_in, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_latent_0", W=hidden_W_init, b=hidden_b_init, weight_normalization=weight_normalization) if batch_normalization: l_latent_out = L.batch_norm(l_latent_out) self._layers.append(l_latent_out) l_hid = L.ElemwiseSumLayer([l_hid, l_latent_out]) # l_hid = L.OpLayer( # l_hid, # op=lambda l_hid, l_latent: # l_hid + l_latent, # shape_op=lambda l_hid_shape, l_latent_shape: # l_hid_shape, # extras=[l_latent_out], # name='sum_obs_latent') l_out = L.DenseLayer(l_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, weight_normalization=weight_normalization) if batch_normalization: l_out = L.batch_norm(l_out) self._layers.append(l_out) self._l_in = l_in self._l_out = l_out # self._input_var = l_in.input_var self._output = L.get_output(l_out) LayersPowered.__init__(self, l_out)
def __init__(self, name, input_shape, extra_input_shape, output_dim, hidden_sizes, conv_filters, conv_filter_sizes, conv_strides, conv_pads, extra_hidden_sizes=None, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer(), output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer(), hidden_nonlinearity=tf.nn.relu, output_nonlinearity=None, input_var=None, input_layer=None): Serializable.quick_init(self, locals()) if extra_hidden_sizes is None: extra_hidden_sizes = [] with tf.variable_scope(name): input_flat_dim = np.prod(input_shape) extra_input_flat_dim = np.prod(extra_input_shape) total_input_flat_dim = input_flat_dim + extra_input_flat_dim if input_layer is None: l_in = L.InputLayer(shape=(None, total_input_flat_dim), input_var=input_var, name="input") else: l_in = input_layer l_conv_in = L.reshape(L.SliceLayer(l_in, indices=slice(input_flat_dim), name="conv_slice"), ([0], ) + input_shape, name="conv_reshaped") l_extra_in = L.reshape(L.SliceLayer(l_in, indices=slice( input_flat_dim, None), name="extra_slice"), ([0], ) + extra_input_shape, name="extra_reshaped") l_conv_hid = l_conv_in for idx, conv_filter, filter_size, stride, pad in zip( range(len(conv_filters)), conv_filters, conv_filter_sizes, conv_strides, conv_pads, ): l_conv_hid = L.Conv2DLayer( l_conv_hid, num_filters=conv_filter, filter_size=filter_size, stride=(stride, stride), pad=pad, nonlinearity=hidden_nonlinearity, name="conv_hidden_%d" % idx, ) l_extra_hid = l_extra_in for idx, hidden_size in enumerate(extra_hidden_sizes): l_extra_hid = L.DenseLayer( l_extra_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="extra_hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, ) l_joint_hid = L.concat( [L.flatten(l_conv_hid, name="conv_hidden_flat"), l_extra_hid], name="joint_hidden") for idx, hidden_size in enumerate(hidden_sizes): l_joint_hid = L.DenseLayer( l_joint_hid, num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="joint_hidden_%d" % idx, W=hidden_W_init, b=hidden_b_init, ) l_out = L.DenseLayer( l_joint_hid, num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init, ) self._l_in = l_in self._l_out = l_out LayersPowered.__init__(self, [l_out], input_layers=[l_in])
def __init__(self, name, input_dim, output_dim, hidden_sizes, hidden_nonlinearity, output_nonlinearity, vocab_size, embedding_size, hidden_W_init=L.xavier_init, hidden_b_init=tf.zeros_initializer, output_W_init=L.xavier_init, output_b_init=tf.zeros_initializer, has_other_input=True, input_var=None, input_layer=None, **kwargs): Serializable.quick_init(self, locals()) with tf.variable_scope(name): if input_layer is None: input_layer = L.InputLayer(shape=(None, input_dim), input_var=input_var, name="input") l_in = input_layer if has_other_input: # Slice apart l_other_in = L.SliceLayer(l_in, "slice_other", slice(0, input_dim - vocab_size), axis=-1) l_emb_in = L.SliceLayer(l_in, "slice_emb", slice(input_dim - vocab_size, input_dim), axis=-1) # HACK: This is cheap with small embedding matrices but will not scale well.. # Find a better way to lookup from this representation + mean-pool l_embs = MeanPoolEmbeddingLayer(l_emb_in, "embeddings", embedding_size) l_hidden_input = L.ConcatLayer([l_other_in, l_embs], "merge") else: l_hidden_input = l_in hidden_layers = [l_hidden_input] for i, hidden_size in enumerate(hidden_sizes): l_hid = L.DenseLayer(hidden_layers[-1], num_units=hidden_size, nonlinearity=hidden_nonlinearity, name="hidden_%i" % i, W=hidden_W_init, b=hidden_b_init) hidden_layers.append(l_hid) l_out = L.DenseLayer(hidden_layers[-1], num_units=output_dim, nonlinearity=output_nonlinearity, name="output", W=output_W_init, b=output_b_init) self.input_layer = l_in self.input_var = l_in.input_var self.output_layer = l_out LayersPowered.__init__(self, l_out)