示例#1
0
    def _build(self, incoming, state, *args, **kwargs):
        """Gated recurrent unit (GRU) with nunits cells."""
        with get_variable_scope('Gates'):  # Reset gate and update gate.
            weights_init = getters.get_initializer(self.weights_init)
            # We start with bias of 1.0 to not reset and not update.
            r, u = array_ops.split(axis=1,
                                   num_or_size_splits=2,
                                   value=_linear([incoming, state],
                                                 2 * self._num_units, True,
                                                 1.0, weights_init,
                                                 self.trainable, self.restore))
            inner_activation = getters.get_activation(self.inner_activation)
            r, u = inner_activation(r), inner_activation(u)
        with get_variable_scope('Candidate'):
            activation = getters.get_activation(self.activation)
            c = activation(
                _linear([incoming, r * state], self._num_units, True, 0.,
                        weights_init, self.trainable, self.restore))
        new_h = u * state + (1 - u) * c

        self._w, self._b = list(), list()
        # Retrieve RNN Variables
        with get_variable_scope(scope='Gates/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))
        with get_variable_scope(scope='Candidate/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))

        return new_h, new_h
示例#2
0
    def _build(self, incoming, state, *args, **kwargs):
        """Gated recurrent unit (GRU) with nunits cells."""
        with get_variable_scope('Gates'):  # Reset gate and update gate.
            weights_init = getters.get_initializer(self.weights_init)
            # We start with bias of 1.0 to not reset and not update.
            r, u = array_ops.split(
                axis=1, num_or_size_splits=2,
                value=_linear([incoming, state], 2 * self._num_units, True, 1.0,
                              weights_init, self.trainable, self.restore))
            inner_activation = getters.get_activation(self.inner_activation)
            r, u = inner_activation(r), inner_activation(u)
        with get_variable_scope('Candidate'):
            activation = getters.get_activation(self.activation)
            c = activation(
                _linear([incoming, r * state], self._num_units, True, 0.,
                        weights_init, self.trainable, self.restore))
        new_h = u * state + (1 - u) * c

        self._w, self._b = list(), list()
        # Retrieve RNN Variables
        with get_variable_scope(scope='Gates/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))
        with get_variable_scope(scope='Candidate/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))

        return new_h, new_h
示例#3
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              unit_forget_bias=True,
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              return_sequences=False,
              go_backwards=False,
              stateful=False,
              dropout=0.,
              recurrent_dropout=0.,
              **kwargs):
     super(ConvLSTM2D, self).__init__(
         filters,
         kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=getters.get_activation(activation),
         recurrent_activation=getters.get_activation(recurrent_activation),
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         recurrent_initializer=getters.get_initializer(
             recurrent_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         unit_forget_bias=unit_forget_bias,
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         recurrent_regularizer=getters.get_regularizer(
             recurrent_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_regularizer(kernel_constraint),
         recurrent_constraint=getters.get_regularizer(recurrent_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         return_sequences=return_sequences,
         go_backwards=go_backwards,
         stateful=stateful,
         dropout=dropout,
         recurrent_dropout=recurrent_dropout,
         **kwargs)
示例#4
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(Conv2D, self).__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=getters.get_activation(activation) if activation else activation,
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_constraint(kernel_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         **kwargs)
示例#5
0
    def _build(self, incoming, state, *args, **kwargs):
        """Long short-term memory cell (LSTM)."""
        self._declare_dependencies()
        activation = getters.get_activation(self.activation)
        inner_activation = getters.get_activation(self.inner_activation)
        # Parameters of gates are concatenated into one multiply for efficiency.
        if self._state_is_tuple:
            c, h = state
        else:
            c, h = array_ops.split(axis=1, num_or_size_splits=2, value=state)
        concat = _linear([incoming, h], 4 * self._num_units, True, 0.,
                         self.weights_init, self.trainable, self.restore)

        # i = input_gate, j = new_input, f = forget_gate, o = output_gate
        i, j, f, o = array_ops.split(axis=1,
                                     num_or_size_splits=4,
                                     value=concat)

        # apply batch normalization to inner state and gates
        if self.batch_norm:
            i = self._batch_norm_i(i)
            j = self._batch_norm_j(j)
            f = self._batch_norm_f(f)
            o = self._batch_norm_o(o)

        new_c = (c * inner_activation(f + self._forget_bias) +
                 inner_activation(i) * activation(j))

        # hidden-to-hidden batch normalizaiton
        if self.batch_norm:
            batch_norm_new_c = self._batch_norm_c(new_c)
            new_h = activation(batch_norm_new_c) * inner_activation(o)
        else:
            new_h = activation(new_c) * inner_activation(o)

        if self._state_is_tuple:
            new_state = rnn.LSTMStateTuple(new_c, new_h)
        else:
            new_state = tf.concat(values=[new_c, new_h], axis=1)

        # Retrieve RNN Variables
        with get_variable_scope(scope='Linear', reuse=True):
            self._w = tf.get_variable('w')
            self._b = tf.get_variable('b')

        return new_h, new_state
示例#6
0
    def _build(self, incoming, state, *args, **kwargs):
        """Long short-term memory cell (LSTM)."""
        self._declare_dependencies()
        activation = getters.get_activation(self.activation)
        inner_activation = getters.get_activation(self.inner_activation)
        # Parameters of gates are concatenated into one multiply for efficiency.
        if self._state_is_tuple:
            c, h = state
        else:
            c, h = array_ops.split(axis=1, num_or_size_splits=2, value=state)
        concat = _linear(
            [incoming, h], 4 * self._num_units, True, 0., self.weights_init,
            self.trainable, self.restore)

        # i = input_gate, j = new_input, f = forget_gate, o = output_gate
        i, j, f, o = array_ops.split(axis=1, num_or_size_splits=4, value=concat)

        # apply batch normalization to inner state and gates
        if self.batch_norm:
            i = self._batch_norm_i(i)
            j = self._batch_norm_j(j)
            f = self._batch_norm_f(f)
            o = self._batch_norm_o(o)

        new_c = (c * inner_activation(f + self._forget_bias) + inner_activation(i) * activation(j))

        # hidden-to-hidden batch normalizaiton
        if self.batch_norm:
            batch_norm_new_c = self._batch_norm_c(new_c)
            new_h = activation(batch_norm_new_c) * inner_activation(o)
        else:
            new_h = activation(new_c) * inner_activation(o)

        if self._state_is_tuple:
            new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
        else:
            new_state = tf.concat(values=[new_c, new_h], axis=1)

        # Retrieve RNN Variables
        with get_variable_scope(scope='Linear', reuse=True):
            self._w = tf.get_variable('w')
            self._b = tf.get_variable('b')

        return new_h, new_state
示例#7
0
文件: core.py 项目: chandu088/p
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

        Returns:
            2D Tensor [samples, num_units].
        """
        self._declare_dependencies()
        input_shape = get_shape(incoming)
        incoming = validate_dtype(incoming)

        assert len(
            input_shape) > 1, 'Incoming Tensor shape must be at least 2-D'
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        regularizer = getters.get_regularizer(self.regularizer,
                                              scale=self.scale,
                                              collect=True)
        self._w = variable(name='w',
                           shape=[n_inputs, self.num_units],
                           dtype=incoming.dtype,
                           regularizer=regularizer,
                           initializer=getters.get_initializer(
                               self.weights_init),
                           trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(tensor=inference, shape=[-1, n_inputs])
        inference = tf.matmul(a=inference, b=self._w)

        self._b = None
        if self.bias:
            self._b = variable(name='b',
                               shape=[self.num_units],
                               dtype=incoming.dtype,
                               initializer=getters.get_initializer(
                                   self.bias_init),
                               trainable=self.trainable,
                               restore=self.restore)
            track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
            inference = tf.nn.bias_add(value=inference, bias=self._b)

        if self.activation:
            inference = getters.get_activation(self.activation,
                                               collect=True)(inference)

        if self._dropout:
            inference = self._dropout(inference)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
示例#8
0
    def _build(self, inputs, state, *args, **kwargs):
        """Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
        weights_init = getters.get_initializer(self.weights_init)
        output = getters.get_activation(self.activation)(
            _linear([inputs, state], self.num_units, True, 0., weights_init,
                    self.trainable, self.restore))
        # Retrieve RNN Variables
        with get_variable_scope(name='Linear', reuse=True):
            self._w = tf.get_variable(name='w')
            self._b = tf.get_variable(name='b')

        return output, output
示例#9
0
    def _build(self, inputs, state, *args, **kwargs):
        """Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
        weights_init = getters.get_initializer(self.weights_init)
        output = getters.get_activation(self.activation)(
            _linear([inputs, state], self.num_units, True, 0.,
                    weights_init, self.trainable, self.restore))
        # Retrieve RNN Variables
        with get_variable_scope(name='Linear', reuse=True):
            self._w = tf.get_variable(name='w')
            self._b = tf.get_variable(name='b')

        return output, output
示例#10
0
 def __init__(self,
              units,
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              unit_forget_bias=True,
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              dropout=0.,
              recurrent_dropout=0.,
              **kwargs):
     super(LSTM, self).__init__(
         units=units,
         activation=getters.get_activation(activation),
         recurrent_activation=getters.get_activation(recurrent_activation),
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         recurrent_initializer=getters.get_initializer(
             recurrent_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         unit_forget_bias=unit_forget_bias,
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         recurrent_regularizer=getters.get_regularizer(
             recurrent_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_constraint(kernel_constraint),
         recurrent_constraint=getters.get_constraint(recurrent_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         dropout=dropout,
         recurrent_dropout=recurrent_dropout,
         **kwargs)
示例#11
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

        Returns:
            2D Tensor [samples, num_units].
        """
        self._declare_dependencies()
        input_shape = get_shape(incoming)
        assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D'
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True)
        initializer = getters.get_initializer(self.weights_init)
        self._w = variable(name='w', shape=[n_inputs, self.num_units], regularizer=regularizer,
                           initializer=initializer, trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        self._b = variable(name='b', shape=[self.num_units],
                           initializer=getters.get_initializer(self.bias_init),
                           trainable=self.trainable, restore=self.restore)
        track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        # Weight and bias for the transform gate
        self._w_t = variable(name='w_t', shape=[n_inputs, self.num_units],
                             regularizer=None, initializer=initializer,
                             trainable=self.trainable, restore=self.restore)
        track(self._w_t, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        self._b_t = variable(name='b_t', shape=[self.num_units],
                             initializer=tf.constant_initializer(-1),
                             trainable=self.trainable, restore=self.restore)
        track(self._b_t, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(tensor=incoming, shape=[-1, n_inputs])

        H = getters.get_activation(self.activation)(tf.matmul(a=incoming, b=self._w) + self._b)
        T = tf.sigmoid(tf.matmul(a=incoming, b=self._w_t) + self._b_t)
        if self._transform_dropout:
            T = self._transform_dropout(T)
        C = tf.subtract(x=1.0, y=T)
        inference = tf.add(x=tf.multiply(x=H, y=T), y=tf.multiply(x=incoming, y=C))
        track(inference, tf.GraphKeys.ACTIVATIONS)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
示例#12
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              depth_multiplier=1,
              activation=None,
              use_bias=True,
              depthwise_initializer='glorot_uniform',
              pointwise_initializer='glorot_uniform',
              bias_initializer='zeros',
              depthwise_regularizer=None,
              pointwise_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              depthwise_constraint=None,
              pointwise_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(SeparableConv2D, self).__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         depth_multiplier=depth_multiplier,
         activation=getters.get_activation(activation),
         use_bias=use_bias,
         depthwise_initializer=getters.get_initializer(
             depthwise_initializer),
         pointwise_initializer=getters.get_initializer(
             pointwise_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         depthwise_regularizer=getters.get_regularizer(
             depthwise_regularizer),
         pointwise_regularizer=getters.get_regularizer(
             pointwise_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         depthwise_constraint=getters.get_constraint(depthwise_constraint),
         pointwise_constraint=getters.get_constraint(pointwise_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         **kwargs)
示例#13
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

        Returns:
            2D Tensor [samples, num_units].
        """
        self._declare_dependencies()
        input_shape = get_shape(incoming)
        incoming = validate_dtype(incoming)

        assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D'
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True)
        self._w = variable(
            name='w', shape=[n_inputs, self.num_units], dtype=incoming.dtype, regularizer=regularizer,
            initializer=getters.get_initializer(self.weights_init), trainable=self.trainable,
            restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(tensor=inference, shape=[-1, n_inputs])
        inference = tf.matmul(a=inference, b=self._w)

        self._b = None
        if self.bias:
            self._b = variable(name='b', shape=[self.num_units], dtype=incoming.dtype,
                               initializer=getters.get_initializer(self.bias_init),
                               trainable=self.trainable, restore=self.restore)
            track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
            inference = tf.nn.bias_add(value=inference, bias=self._b)

        if self.activation:
            inference = getters.get_activation(self.activation, collect=True)(inference)

        if self._dropout:
            inference = self._dropout(inference)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
示例#14
0
文件: core.py 项目: vdt/polyaxon
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: 1-D Tensor [samples]. If not 2D, input will be flatten.

        Returns:
            1-D Tensor [samples].
        """
        input_shape = get_shape(incoming)
        n_inputs = int(np.prod(a=input_shape[1:]))

        initializer = tf.constant_initializer(value=np.random.randn())
        self._w = variable(name='w',
                           shape=[n_inputs],
                           dtype=incoming.dtype,
                           initializer=initializer,
                           trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(tensor=inference, shape=[-1])
        inference = tf.multiply(x=inference, y=self._w)

        self._b = None
        if self.bias:
            self._b = variable(name='b',
                               shape=[n_inputs],
                               dtype=incoming.dtype,
                               initializer=initializer,
                               trainable=self.trainable,
                               restore=self.restore)
            inference = tf.add(inference, self._b)
            track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        if self.activation:
            inference = getters.get_activation(self.activation,
                                               collect=True)(inference)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
示例#15
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: 1-D Tensor [samples]. If not 2D, input will be flatten.

        Returns:
            1-D Tensor [samples].
        """
        input_shape = get_shape(incoming)
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        initializer = tf.constant_initializer(value=np.random.randn())
        self._w = variable(name='w', shape=[n_inputs],
                           dtype=incoming.dtype, initializer=initializer,
                           trainable=self.trainable, restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 1:
            inference = tf.reshape(tensor=inference, shape=[-1])
        inference = tf.multiply(x=inference, y=self._w)

        self._b = None
        if self.bias:
            self._b = variable(name='b', shape=[n_inputs],
                               dtype=incoming.dtype, initializer=initializer,
                               trainable=self.trainable, restore=self.restore)
            inference = tf.add(inference, self._b)
            track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        if self.activation:
            inference = getters.get_activation(self.activation, collect=True)(inference)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
示例#16
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(Dense, self).__init__(
         units,
         activation=getters.get_activation(activation) if activation else activation,
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_constraint(kernel_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         **kwargs)