Ejemplo n.º 1
0
    def __init__(self, name, input_shape, output_dim, hidden_dim, hidden_nonlinearity=tf.nn.relu,
                 gru_layer_cls=L.GRULayer,
                 output_nonlinearity=None, input_var=None, input_layer=None, layer_args=None):
        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(
                    shape=(None, None) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            l_step_input = L.InputLayer(
                shape=(None,) + input_shape, name="step_input")
            l_step_prev_state = L.InputLayer(
                shape=(None, hidden_dim), name="step_prev_state")
            if layer_args is None:
                layer_args = dict()
            l_gru = gru_layer_cls(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity,
                                  hidden_init_trainable=False, name="gru", **layer_args)
            l_gru_flat = L.ReshapeLayer(
                l_gru, shape=(-1, hidden_dim),
                name="gru_flat"
            )
            l_output_flat = L.DenseLayer(
                l_gru_flat,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output_flat"
            )
            l_output = L.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input:
                tf.reshape(flat_output, tf.pack(
                    (tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output"
            )
            l_step_state = l_gru.get_step_layer(
                l_step_input, l_step_prev_state, name="step_state")
            l_step_hidden = l_step_state
            l_step_output = L.DenseLayer(
                l_step_hidden,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                W=l_output_flat.W,
                b=l_output_flat.b,
                name="step_output"
            )

            self._l_in = l_in
            self._hid_init_param = l_gru.h0
            self._l_gru = l_gru
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim
Ejemplo n.º 2
0
    def __init__(self, name, output_dim, hidden_sizes, hidden_nonlinearity,
                 output_nonlinearity, hidden_W_init=L.XavierUniformInitializer(), hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(), output_b_init=tf.zeros_initializer, batch_size=None,
                 input_var=None, input_layer=None, input_shape=None, batch_normalization=False, weight_normalization=False,
                 ):
        Serializable.quick_init(self, locals())
        self.name = name

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(
                    shape=(batch_size,) + input_shape, input_var=input_var, name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]
            l_hid = l_in
            if batch_normalization:
                ls = L.batch_norm(l_hid)
                l_hid = ls[-1]
                self._layers += ls
            for idx, hidden_size in enumerate(hidden_sizes):
                l_hid = L.DenseLayer(
                    l_hid,
                    num_units=hidden_size,
                    nonlinearity=hidden_nonlinearity,
                    name="hidden_%d" % idx,
                    W=hidden_W_init,
                    b=hidden_b_init,
                    weight_normalization=weight_normalization
                )
                if batch_normalization:
                    ls = L.batch_norm(l_hid)
                    l_hid = ls[-1]
                    self._layers += ls
                self._layers.append(l_hid)
            l_out = L.DenseLayer(
                l_hid,
                num_units=output_dim,
                nonlinearity=output_nonlinearity,
                name="output",
                W=output_W_init,
                b=output_b_init,
                weight_normalization=weight_normalization
            )
            if batch_normalization:
                ls = L.batch_norm(l_out)
                l_out = ls[-1]
                self._layers += ls
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            self._l_tar = L.InputLayer(
                shape=(batch_size,) + (output_dim,), input_var=input_var, name="target")

            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 3
0
def feedforward(l_hid,
                hidden_sizes,
                hidden_nonlinearity,
                weight_normalization=False,
                hidden_W_init=L.XavierUniformInitializer(),
                hidden_b_init=tf.zeros_initializer,
                linear_output=False,
                start_idx=0):
    for idx, hidden_size in enumerate(hidden_sizes):
        if linear_output and (idx == (len(hidden_sizes) - 1)):
            nonlin = None
        else:
            nonlin = hidden_nonlinearity
        l_hid = L.DenseLayer(l_hid,
                             num_units=hidden_size,
                             nonlinearity=hidden_nonlinearity,
                             name="hidden_%d" % (idx + start_idx),
                             W=hidden_W_init,
                             b=hidden_b_init,
                             weight_normalization=weight_normalization)
    return l_hid
Ejemplo n.º 4
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 hidden_dim,
                 hidden_nonlinearity=tf.nn.relu,
                 lstm_layer_cls=L.LSTMLayer,
                 output_nonlinearity=None,
                 input_var=None,
                 input_layer=None,
                 forget_bias=1.0,
                 use_peepholes=False,
                 layer_args=None):
        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(None, None) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            l_step_input = L.InputLayer(shape=(None, ) + input_shape,
                                        name="step_input")
            # contains previous hidden and cell state
            l_step_prev_state = L.InputLayer(shape=(None, hidden_dim * 2),
                                             name="step_prev_state")
            if layer_args is None:
                layer_args = dict()
            l_lstm = lstm_layer_cls(l_in,
                                    num_units=hidden_dim,
                                    hidden_nonlinearity=hidden_nonlinearity,
                                    hidden_init_trainable=False,
                                    name="lstm",
                                    forget_bias=forget_bias,
                                    cell_init_trainable=False,
                                    use_peepholes=use_peepholes,
                                    **layer_args)
            l_lstm_flat = L.ReshapeLayer(l_lstm,
                                         shape=(-1, hidden_dim),
                                         name="lstm_flat")
            l_output_flat = L.DenseLayer(l_lstm_flat,
                                         num_units=output_dim,
                                         nonlinearity=output_nonlinearity,
                                         name="output_flat")
            l_output = L.OpLayer(
                l_output_flat,
                op=lambda flat_output, l_input: tf.reshape(
                    flat_output,
                    tf.stack(
                        (tf.shape(l_input)[0], tf.shape(l_input)[1], -1))),
                shape_op=lambda flat_output_shape, l_input_shape:
                (l_input_shape[0], l_input_shape[1], flat_output_shape[-1]),
                extras=[l_in],
                name="output")
            l_step_state = l_lstm.get_step_layer(l_step_input,
                                                 l_step_prev_state,
                                                 name="step_state")
            l_step_hidden = L.SliceLayer(l_step_state,
                                         indices=slice(hidden_dim),
                                         name="step_hidden")
            l_step_cell = L.SliceLayer(l_step_state,
                                       indices=slice(hidden_dim, None),
                                       name="step_cell")
            l_step_output = L.DenseLayer(l_step_hidden,
                                         num_units=output_dim,
                                         nonlinearity=output_nonlinearity,
                                         W=l_output_flat.W,
                                         b=l_output_flat.b,
                                         name="step_output")

            self._l_in = l_in
            self._hid_init_param = l_lstm.h0
            self._cell_init_param = l_lstm.c0
            self._l_lstm = l_lstm
            self._l_out = l_output
            self._l_step_input = l_step_input
            self._l_step_prev_state = l_step_prev_state
            self._l_step_hidden = l_step_hidden
            self._l_step_cell = l_step_cell
            self._l_step_state = l_step_state
            self._l_step_output = l_step_output
            self._hidden_dim = hidden_dim
Ejemplo n.º 5
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 conv_filters,
                 conv_filter_sizes,
                 conv_strides,
                 conv_pads,
                 hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer,
                 input_var=None,
                 input_layer=None,
                 batch_normalization=False,
                 weight_normalization=False):
        Serializable.quick_init(self, locals())
        """
        A network composed of several convolution layers followed by some fc layers.
        input_shape: (width,height,channel)
            HOWEVER, network inputs are assumed flattened. This network will first unflatten the inputs and then apply the standard convolutions and so on.
        conv_filters: a list of numbers of convolution kernel
        conv_filter_sizes: a list of sizes (int) of the convolution kernels
        conv_strides: a list of strides (int) of the conv kernels
        conv_pads: a list of pad formats (either 'SAME' or 'VALID')
        hidden_nonlinearity: a nonlinearity from tf.nn, shared by all conv and fc layers
        hidden_sizes: a list of numbers of hidden units for all fc layers
        """
        with tf.variable_scope(name):
            if input_layer is not None:
                l_in = input_layer
                l_hid = l_in
            elif len(input_shape) == 3:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)),
                                    input_var=input_var,
                                    name="input")
                l_hid = L.reshape(l_in, ([0], ) + input_shape,
                                  name="reshape_input")
            elif len(input_shape) == 2:
                l_in = L.InputLayer(shape=(None, np.prod(input_shape)),
                                    input_var=input_var,
                                    name="input")
                input_shape = (1, ) + input_shape
                l_hid = L.reshape(l_in, ([0], ) + input_shape,
                                  name="reshape_input")
            else:
                l_in = L.InputLayer(shape=(None, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
                l_hid = l_in

            if batch_normalization:
                l_hid = L.batch_norm(l_hid)
            for idx, conv_filter, filter_size, stride, pad in zip(
                    range(len(conv_filters)),
                    conv_filters,
                    conv_filter_sizes,
                    conv_strides,
                    conv_pads,
            ):
                l_hid = L.Conv2DLayer(
                    l_hid,
                    num_filters=conv_filter,
                    filter_size=filter_size,
                    stride=(stride, stride),
                    pad=pad,
                    nonlinearity=hidden_nonlinearity,
                    name="conv_hidden_%d" % idx,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_hid = L.batch_norm(l_hid)

            if output_nonlinearity == L.spatial_expected_softmax:
                assert len(hidden_sizes) == 0
                assert output_dim == conv_filters[-1] * 2
                l_hid.nonlinearity = tf.identity
                l_out = L.SpatialExpectedSoftmaxLayer(l_hid)
            else:
                l_hid = L.flatten(l_hid, name="conv_flatten")
                for idx, hidden_size in enumerate(hidden_sizes):
                    l_hid = L.DenseLayer(
                        l_hid,
                        num_units=hidden_size,
                        nonlinearity=hidden_nonlinearity,
                        name="hidden_%d" % idx,
                        W=hidden_W_init,
                        b=hidden_b_init,
                        weight_normalization=weight_normalization,
                    )
                    if batch_normalization:
                        l_hid = L.batch_norm(l_hid)
                l_out = L.DenseLayer(
                    l_hid,
                    num_units=output_dim,
                    nonlinearity=output_nonlinearity,
                    name="output",
                    W=output_W_init,
                    b=output_b_init,
                    weight_normalization=weight_normalization,
                )
                if batch_normalization:
                    l_out = L.batch_norm(l_out)
            self._l_in = l_in
            self._l_out = l_out
            # self._input_var = l_in.input_var

        LayersPowered.__init__(self, l_out)
Ejemplo n.º 6
0
    def __init__(self,
                 name,
                 input_shape,
                 output_dim,
                 z_dim,
                 pre_hidden_sizes,
                 post_hidden_sizes,
                 hidden_nonlinearity,
                 output_nonlinearity,
                 hidden_W_init=L.XavierUniformInitializer(),
                 hidden_b_init=tf.zeros_initializer,
                 output_W_init=L.XavierUniformInitializer(),
                 output_b_init=tf.zeros_initializer,
                 batch_size=None,
                 input_var=None,
                 input_layer=None,
                 weight_normalization=False):

        Serializable.quick_init(self, locals())
        self.name = name

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(batch_size, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]

            # construct graph
            l_hid = feedforward(l_in,
                                pre_hidden_sizes,
                                hidden_nonlinearity,
                                hidden_W_init=hidden_W_init,
                                hidden_b_init=hidden_b_init,
                                weight_normalization=weight_normalization,
                                start_idx=0)
            l_lat = L.LatentLayer(l_hid, z_dim)
            l_hid = feedforward(l_lat,
                                post_hidden_sizes,
                                hidden_nonlinearity,
                                hidden_W_init=hidden_W_init,
                                hidden_b_init=hidden_b_init,
                                weight_normalization=weight_normalization,
                                start_idx=len(pre_hidden_sizes))

            # create output layer
            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)

            self._layers.append(l_out)
            self._l_lat = l_lat
            self._z_dim = z_dim
            self._l_in = l_in
            self._l_out = l_out
            self._l_tar = L.InputLayer(shape=(batch_size, ) + (output_dim, ),
                                       input_var=input_var,
                                       name="target")

            # complexity loss for variational posterior
            z_mu, z_sig = self._l_lat.get_dparams_for(
                L.get_output(self._l_lat.input_layer))
            self.kl_cost = kl_from_prior(z_mu, z_sig, self._z_dim)

            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)
Ejemplo n.º 7
0
    def __init__(
        self,
        name,
        input_shape,
        output_dim,
        hidden_sizes,
        hidden_nonlinearity,
        output_nonlinearity,
        z_dim,
        z_idx,
        z_hidden_sizes,
        merge="mul",
        hidden_W_init=L.XavierUniformInitializer(),
        hidden_b_init=tf.zeros_initializer,
        output_W_init=L.XavierUniformInitializer(),
        output_b_init=tf.zeros_initializer,
        batch_size=None,
        input_var=None,
        input_layer=None,
        batch_normalization=False,
        weight_normalization=False,
    ):

        Serializable.quick_init(self, locals())
        self.name = name

        total_dim = np.prod(input_shape)

        with tf.variable_scope(name):
            if input_layer is None:
                l_in = L.InputLayer(shape=(batch_size, ) + input_shape,
                                    input_var=input_var,
                                    name="input")
            else:
                l_in = input_layer
            self._layers = [l_in]

            # slice off features / observation
            l_feat = L.SliceLayer(l_in,
                                  indices=slice(0, total_dim - z_dim),
                                  name="l_feat")

            # slice off z "style" variable
            l_z = L.SliceLayer(l_in,
                               indices=slice(total_dim - z_dim, total_dim),
                               name="l_z")

            l_pre = feedforward(l_feat,
                                hidden_sizes[:z_idx],
                                hidden_nonlinearity,
                                linear_output=True)
            with tf.variable_scope("z"):
                # if merging mul, ensure dimensionalities match.
                if merge == "mul":
                    _head = [total_dim] + hidden_sizes
                    _head = [_head[z_idx]]
                elif merge == "concat":
                    _head = []
                l_z = feedforward(l_z,
                                  z_hidden_sizes + _head,
                                  hidden_nonlinearity,
                                  linear_output=True)

            # merge latent code with features
            if merge == "mul":
                l_merge = L.ElemwiseMulLayer([l_pre, l_z])
            elif merge == "concat":
                l_merge = L.ConcatLayer([l_pre, l_z], axis=1)
            else:
                raise NotImplementedError

            if z_idx > 0:
                l_merge = L.NonlinearityLayer(l_merge, hidden_nonlinearity)
            l_hid = feedforward(l_merge,
                                hidden_sizes[z_idx:],
                                hidden_nonlinearity,
                                start_idx=z_idx)

            l_out = L.DenseLayer(l_hid,
                                 num_units=output_dim,
                                 nonlinearity=output_nonlinearity,
                                 name="output",
                                 W=output_W_init,
                                 b=output_b_init,
                                 weight_normalization=weight_normalization)
            #if batch_normalization:
            #    ls = L.batch_norm(l_out)
            #    l_out = ls[-1]
            #    self._layers += ls
            self._layers.append(l_out)
            self._l_in = l_in
            self._l_out = l_out
            self._l_tar = L.InputLayer(shape=(batch_size, ) + (output_dim, ),
                                       input_var=input_var,
                                       name="target")

            # self._input_var = l_in.input_var
            self._output = L.get_output(l_out)

            LayersPowered.__init__(self, l_out)