예제 #1
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(Conv2D, self).__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=getters.get_activation(activation) if activation else activation,
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_constraint(kernel_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         **kwargs)
예제 #2
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1),
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              unit_forget_bias=True,
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              return_sequences=False,
              go_backwards=False,
              stateful=False,
              dropout=0.,
              recurrent_dropout=0.,
              **kwargs):
     super(ConvLSTM2D, self).__init__(
         filters,
         kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=getters.get_activation(activation),
         recurrent_activation=getters.get_activation(recurrent_activation),
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         recurrent_initializer=getters.get_initializer(
             recurrent_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         unit_forget_bias=unit_forget_bias,
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         recurrent_regularizer=getters.get_regularizer(
             recurrent_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_regularizer(kernel_constraint),
         recurrent_constraint=getters.get_regularizer(recurrent_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         return_sequences=return_sequences,
         go_backwards=go_backwards,
         stateful=stateful,
         dropout=dropout,
         recurrent_dropout=recurrent_dropout,
         **kwargs)
예제 #3
0
파일: core.py 프로젝트: chandu088/p
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

        Returns:
            2D Tensor [samples, num_units].
        """
        self._declare_dependencies()
        input_shape = get_shape(incoming)
        incoming = validate_dtype(incoming)

        assert len(
            input_shape) > 1, 'Incoming Tensor shape must be at least 2-D'
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        regularizer = getters.get_regularizer(self.regularizer,
                                              scale=self.scale,
                                              collect=True)
        self._w = variable(name='w',
                           shape=[n_inputs, self.num_units],
                           dtype=incoming.dtype,
                           regularizer=regularizer,
                           initializer=getters.get_initializer(
                               self.weights_init),
                           trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(tensor=inference, shape=[-1, n_inputs])
        inference = tf.matmul(a=inference, b=self._w)

        self._b = None
        if self.bias:
            self._b = variable(name='b',
                               shape=[self.num_units],
                               dtype=incoming.dtype,
                               initializer=getters.get_initializer(
                                   self.bias_init),
                               trainable=self.trainable,
                               restore=self.restore)
            track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
            inference = tf.nn.bias_add(value=inference, bias=self._b)

        if self.activation:
            inference = getters.get_activation(self.activation,
                                               collect=True)(inference)

        if self._dropout:
            inference = self._dropout(inference)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
예제 #4
0
파일: core.py 프로젝트: ysheng312/polyaxon
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

        Returns:
            2D Tensor [samples, num_units].
        """
        self._declare_dependencies()
        input_shape = get_shape(incoming)
        assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D'
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True)
        initializer = getters.get_initializer(self.weights_init)
        self._w = variable(name='w', shape=[n_inputs, self.num_units], regularizer=regularizer,
                           initializer=initializer, trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        self._b = variable(name='b', shape=[self.num_units],
                           initializer=getters.get_initializer(self.bias_init),
                           trainable=self.trainable, restore=self.restore)
        track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        # Weight and bias for the transform gate
        self._w_t = variable(name='w_t', shape=[n_inputs, self.num_units],
                             regularizer=None, initializer=initializer,
                             trainable=self.trainable, restore=self.restore)
        track(self._w_t, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        self._b_t = variable(name='b_t', shape=[self.num_units],
                             initializer=tf.constant_initializer(-1),
                             trainable=self.trainable, restore=self.restore)
        track(self._b_t, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            incoming = tf.reshape(tensor=incoming, shape=[-1, n_inputs])

        H = getters.get_activation(self.activation)(tf.matmul(a=incoming, b=self._w) + self._b)
        T = tf.sigmoid(tf.matmul(a=incoming, b=self._w_t) + self._b_t)
        if self._transform_dropout:
            T = self._transform_dropout(T)
        C = tf.subtract(x=1.0, y=T)
        inference = tf.add(x=tf.multiply(x=H, y=T), y=tf.multiply(x=incoming, y=C))
        track(inference, tf.GraphKeys.ACTIVATIONS)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
예제 #5
0
파일: utils.py 프로젝트: chandu088/p
def create_global_counter(collection, name, graph=None):
    """Create global counter tensor in graph.

    Args:
        collection: the counter's collection.
        name: the counter's name.
        graph: The graph in which to create the global counter tensor. If missing,
        use default graph.

    Returns:
        Global step tensor.

    Raises:
        ValueError: if global counter tensor is already defined.
    """
    graph = graph or tf.get_default_graph()
    if get_global_counter(collection, name, graph) is not None:
        raise ValueError("`{}` already exists.".format(collection))
    # Create in proper graph and base name_scope.
    with graph.as_default() as g, g.name_scope(None):
        return variable(
            collection,
            shape=[],
            dtype=tf.int64,
            initializer=getters.get_initializer('zeros', dtype=tf.int64),
            trainable=False,
            collections=[tf.GraphKeys.GLOBAL_VARIABLES, collection])
예제 #6
0
def create_global_counter(collection, name, graph=None):
    """Create global counter tensor in graph.

    Args:
        collection: the counter's collection.
        name: the counter's name.
        graph: The graph in which to create the global counter tensor. If missing,
        use default graph.

    Returns:
        Global step tensor.

    Raises:
        ValueError: if global counter tensor is already defined.
    """
    graph = graph or tf.get_default_graph()
    if get_global_counter(collection, name, graph) is not None:
        raise ValueError("`{}` already exists.".format(collection))
    # Create in proper graph and base name_scope.
    with graph.as_default() as g, g.name_scope(None):
        return variable(
            collection,
            shape=[],
            dtype=tf.int64,
            initializer=getters.get_initializer('zeros', dtype=tf.int64),
            trainable=False,
            collections=[tf.GraphKeys.GLOBAL_VARIABLES, collection])
예제 #7
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            2-D Tensor [samples, ids].

        Returns:
            3-D Tensor [samples, embedded_ids, features].
        """
        input_shape = get_shape(incoming)
        assert len(input_shape) == 2, 'Incoming Tensor shape must be 2-D'

        weights_init = getters.get_initializer(self.weights_init)

        self._w = variable('w',
                           shape=[self.input_dim, self.output_dim],
                           initializer=weights_init,
                           trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
        inference = tf.cast(x=incoming, dtype=tf.int32)
        inference = tf.nn.embedding_lookup(
            params=self._w,
            ids=inference,
            validate_indices=self.validate_indices)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
예제 #8
0
    def _build(self, incoming, state, *args, **kwargs):
        """Gated recurrent unit (GRU) with nunits cells."""
        with get_variable_scope('Gates'):  # Reset gate and update gate.
            weights_init = getters.get_initializer(self.weights_init)
            # We start with bias of 1.0 to not reset and not update.
            r, u = array_ops.split(axis=1,
                                   num_or_size_splits=2,
                                   value=_linear([incoming, state],
                                                 2 * self._num_units, True,
                                                 1.0, weights_init,
                                                 self.trainable, self.restore))
            inner_activation = getters.get_activation(self.inner_activation)
            r, u = inner_activation(r), inner_activation(u)
        with get_variable_scope('Candidate'):
            activation = getters.get_activation(self.activation)
            c = activation(
                _linear([incoming, r * state], self._num_units, True, 0.,
                        weights_init, self.trainable, self.restore))
        new_h = u * state + (1 - u) * c

        self._w, self._b = list(), list()
        # Retrieve RNN Variables
        with get_variable_scope(scope='Gates/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))
        with get_variable_scope(scope='Candidate/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))

        return new_h, new_h
예제 #9
0
 def __init__(self,
              mode,
              num_units,
              forget_bias=1.0,
              state_is_tuple=True,
              activation='tanh',
              inner_activation='sigmoid',
              bias=True,
              weights_init=None,
              batch_norm=False,
              trainable=True,
              restore=True,
              name='BasicLSTMCell'):
     super(BasicLSTMCell, self).__init__(mode, name)
     if not state_is_tuple:
         logging.warning(
             '{}: Using a concatenated state is slower and will soon be '
             'deprecated.  Use state_is_tuple=True.'.format(self))
     self._num_units = num_units
     self._forget_bias = forget_bias
     self._state_is_tuple = state_is_tuple
     self.batch_norm = batch_norm
     self.activation = activation
     self.inner_activation = inner_activation
     self.bias = bias
     self.weights_init = getters.get_initializer(weights_init)
     self.trainable = trainable
     self.restore = restore
예제 #10
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            2-D Tensor [samples, ids].

        Returns:
            3-D Tensor [samples, embedded_ids, features].
        """
        input_shape = get_shape(incoming)
        assert len(input_shape) == 2, 'Incoming Tensor shape must be 2-D'

        weights_init = getters.get_initializer(self.weights_init)

        self._w = variable('w',
                           shape=[self.input_dim, self.output_dim],
                           initializer=weights_init,
                           trainable=self.trainable,
                           restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
        inference = tf.cast(x=incoming, dtype=tf.int32)
        inference = tf.nn.embedding_lookup(
            params=self._w,
            ids=inference,
            validate_indices=self.validate_indices)

        # Embedding doesn't support masking, so we save sequence length prior to the lookup.
        # Expand dim to 3d.
        shape = [-1] + inference.get_shape().as_list()[1:3] + [1]
        inference.seq_length = retrieve_seq_length_op(
            tf.reshape(incoming, shape))
        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
예제 #11
0
    def _build(self, incoming, state, *args, **kwargs):
        """Gated recurrent unit (GRU) with nunits cells."""
        with get_variable_scope('Gates'):  # Reset gate and update gate.
            weights_init = getters.get_initializer(self.weights_init)
            # We start with bias of 1.0 to not reset and not update.
            r, u = array_ops.split(
                axis=1, num_or_size_splits=2,
                value=_linear([incoming, state], 2 * self._num_units, True, 1.0,
                              weights_init, self.trainable, self.restore))
            inner_activation = getters.get_activation(self.inner_activation)
            r, u = inner_activation(r), inner_activation(u)
        with get_variable_scope('Candidate'):
            activation = getters.get_activation(self.activation)
            c = activation(
                _linear([incoming, r * state], self._num_units, True, 0.,
                        weights_init, self.trainable, self.restore))
        new_h = u * state + (1 - u) * c

        self._w, self._b = list(), list()
        # Retrieve RNN Variables
        with get_variable_scope(scope='Gates/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))
        with get_variable_scope(scope='Candidate/Linear', reuse=True):
            self._w.append(x=tf.get_variable('w'))
            self._b.append(x=tf.get_variable('b'))

        return new_h, new_h
예제 #12
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            incoming: (2+)-D Tensor [samples, input dim]. If not 2D, input will be flatten.

        Returns:
            2D Tensor [samples, num_units].
        """
        self._declare_dependencies()
        input_shape = get_shape(incoming)
        incoming = validate_dtype(incoming)

        assert len(input_shape) > 1, 'Incoming Tensor shape must be at least 2-D'
        n_inputs = total_tensor_depth(tensor_shape=input_shape)

        regularizer = getters.get_regularizer(self.regularizer, scale=self.scale, collect=True)
        self._w = variable(
            name='w', shape=[n_inputs, self.num_units], dtype=incoming.dtype, regularizer=regularizer,
            initializer=getters.get_initializer(self.weights_init), trainable=self.trainable,
            restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)

        inference = incoming
        # If input is not 2d, flatten it.
        if len(input_shape) > 2:
            inference = tf.reshape(tensor=inference, shape=[-1, n_inputs])
        inference = tf.matmul(a=inference, b=self._w)

        self._b = None
        if self.bias:
            self._b = variable(name='b', shape=[self.num_units], dtype=incoming.dtype,
                               initializer=getters.get_initializer(self.bias_init),
                               trainable=self.trainable, restore=self.restore)
            track(self._b, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
            inference = tf.nn.bias_add(value=inference, bias=self._b)

        if self.activation:
            inference = getters.get_activation(self.activation, collect=True)(inference)

        if self._dropout:
            inference = self._dropout(inference)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference
예제 #13
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format=None,
              depth_multiplier=1,
              activation=None,
              use_bias=True,
              depthwise_initializer='glorot_uniform',
              pointwise_initializer='glorot_uniform',
              bias_initializer='zeros',
              depthwise_regularizer=None,
              pointwise_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              depthwise_constraint=None,
              pointwise_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(SeparableConv2D, self).__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         depth_multiplier=depth_multiplier,
         activation=getters.get_activation(activation),
         use_bias=use_bias,
         depthwise_initializer=getters.get_initializer(
             depthwise_initializer),
         pointwise_initializer=getters.get_initializer(
             pointwise_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         depthwise_regularizer=getters.get_regularizer(
             depthwise_regularizer),
         pointwise_regularizer=getters.get_regularizer(
             pointwise_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         depthwise_constraint=getters.get_constraint(depthwise_constraint),
         pointwise_constraint=getters.get_constraint(pointwise_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         **kwargs)
예제 #14
0
    def _build(self, inputs, state, *args, **kwargs):
        """Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
        weights_init = getters.get_initializer(self.weights_init)
        output = getters.get_activation(self.activation)(
            _linear([inputs, state], self.num_units, True, 0.,
                    weights_init, self.trainable, self.restore))
        # Retrieve RNN Variables
        with get_variable_scope(name='Linear', reuse=True):
            self._w = tf.get_variable(name='w')
            self._b = tf.get_variable(name='b')

        return output, output
예제 #15
0
    def _build(self, inputs, state, *args, **kwargs):
        """Most basic RNN: output = new_state = activation(W * input + U * state + B)."""
        weights_init = getters.get_initializer(self.weights_init)
        output = getters.get_activation(self.activation)(
            _linear([inputs, state], self.num_units, True, 0., weights_init,
                    self.trainable, self.restore))
        # Retrieve RNN Variables
        with get_variable_scope(name='Linear', reuse=True):
            self._w = tf.get_variable(name='w')
            self._b = tf.get_variable(name='b')

        return output, output
예제 #16
0
 def __init__(self,
              units,
              activation='tanh',
              recurrent_activation='hard_sigmoid',
              use_bias=True,
              kernel_initializer='glorot_uniform',
              recurrent_initializer='orthogonal',
              bias_initializer='zeros',
              unit_forget_bias=True,
              kernel_regularizer=None,
              recurrent_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              recurrent_constraint=None,
              bias_constraint=None,
              dropout=0.,
              recurrent_dropout=0.,
              **kwargs):
     super(LSTM, self).__init__(
         units=units,
         activation=getters.get_activation(activation),
         recurrent_activation=getters.get_activation(recurrent_activation),
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         recurrent_initializer=getters.get_initializer(
             recurrent_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         unit_forget_bias=unit_forget_bias,
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         recurrent_regularizer=getters.get_regularizer(
             recurrent_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_constraint(kernel_constraint),
         recurrent_constraint=getters.get_constraint(recurrent_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         dropout=dropout,
         recurrent_dropout=recurrent_dropout,
         **kwargs)
예제 #17
0
    def _prelu(x, name):
        with get_name_scope(name):
            if channel_shared:
                w_shape = (1,)
            else:
                w_shape = get_shape(x)[-1:]

            W_init = getters.get_initializer(weights_init)
            alphas = variable(shape=w_shape, initializer=W_init, restore=restore, name="alphas")

            x = tf.nn.relu(features=x) + tf.multiply(x=alphas, y=(x - tf.abs(x))) * 0.5
            x.alphas = alphas
            return x
예제 #18
0
    def _prelu(x, name):
        with get_name_scope(name):
            if channel_shared:
                w_shape = (1,)
            else:
                w_shape = get_shape(x)[-1:]

            w_init = getters.get_initializer(weights_init)
            alphas = variable(shape=w_shape, initializer=w_init, restore=restore, name="alphas")

            x = tf.nn.relu(features=x) + tf.multiply(x=alphas, y=(x - tf.abs(x))) * 0.5
            x.alphas = alphas
            return x
예제 #19
0
def variable(name,
             shape=None,
             dtype=tf.float32,
             initializer=None,
             regularizer=None,
             trainable=True,
             collections=None,
             device='',
             restore=True):
    """Instantiate a new variable.

    Args:
        name: `str`. A name for this variable.
        shape: list of `int`. The variable shape (optional).
        dtype: `type`. The variable data type.
        initializer: `str` or `Tensor`. The variable initialization.
        regularizer: `str` or `Tensor`. The variable regularizer.
        trainable: `bool`. If True, this variable weights will be trained.
        collections: `str`. A collection to add the new variable to (optional).
        device: `str`. Device ID to store the variable. Default: '/cpu:0'.
        restore: `bool`. Restore or not this variable when loading a pre-trained model.

    Returns:
        A Variable.
    """

    if isinstance(initializer, six.string_types):
        initializer = getters.get_initializer(initializer)
    # Remove shape param if initializer is a Tensor
    if not callable(initializer) and isinstance(initializer, tf.Tensor):
        shape = None

    if isinstance(regularizer, six.string_types):
        regularizer = getters.get_regularizer(regularizer)

    with tf.device(device_name_or_function=device):
        var = tf.get_variable(name=name,
                              shape=shape,
                              dtype=dtype,
                              initializer=initializer,
                              regularizer=regularizer,
                              trainable=trainable,
                              collections=collections)

        if not restore:
            # TODO adapt restoring saver
            tf.add_to_collection(name=tf.GraphKeys.EXCL_RESTORE_VARIABLES,
                                 value=var)

        return var
예제 #20
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(Dense, self).__init__(
         units,
         activation=getters.get_activation(activation) if activation else activation,
         use_bias=use_bias,
         kernel_initializer=getters.get_initializer(kernel_initializer),
         bias_initializer=getters.get_initializer(bias_initializer),
         kernel_regularizer=getters.get_regularizer(kernel_regularizer),
         bias_regularizer=getters.get_regularizer(bias_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         kernel_constraint=getters.get_constraint(kernel_constraint),
         bias_constraint=getters.get_constraint(bias_constraint),
         **kwargs)
예제 #21
0
 def __init__(self, mode, num_units, forget_bias=1.0, state_is_tuple=True, activation='tanh',
              inner_activation='sigmoid', bias=True, weights_init=None,
              batch_norm=False, trainable=True, restore=True, name='BasicLSTMCell'):
     super(BasicLSTMCell, self).__init__(mode, name)
     if not state_is_tuple:
         logging.warning(
             '{}: Using a concatenated state is slower and will soon be '
             'deprecated.  Use state_is_tuple=True.'.format(self))
     self._num_units = num_units
     self._forget_bias = forget_bias
     self._state_is_tuple = state_is_tuple
     self.batch_norm = batch_norm
     self.activation = activation
     self.inner_activation = inner_activation
     self.bias = bias
     self.weights_init = getters.get_initializer(weights_init)
     self.trainable = trainable
     self.restore = restore
예제 #22
0
 def __init__(self,
              input_dim,
              output_dim,
              embeddings_initializer='uniform',
              embeddings_regularizer=None,
              activity_regularizer=None,
              embeddings_constraint=None,
              mask_zero=False,
              input_length=None,
              **kwargs):
     super(Embedding, self).__init__(
         input_dim=input_dim,
         output_dim=output_dim,
         embeddings_initializer=getters.get_initializer(embeddings_initializer),
         embeddings_regularizer=getters.get_regularizer(embeddings_regularizer),
         activity_regularizer=getters.get_regularizer(activity_regularizer),
         embeddings_constraint=getters.get_constraint(embeddings_constraint),
         mask_zero=mask_zero,
         input_length=input_length,
         **kwargs)
예제 #23
0
def variable(name, shape=None, dtype=tf.float32, initializer=None, regularizer=None,
             trainable=True, collections=None, device='', restore=True):
    """Instantiate a new variable.

    Args:
        name: `str`. A name for this variable.
        shape: list of `int`. The variable shape (optional).
        dtype: `type`. The variable data type.
        initializer: `str` or `Tensor`. The variable initialization.
        regularizer: `str` or `Tensor`. The variable regularizer.
        trainable: `bool`. If True, this variable weights will be trained.
        collections: `str`. A collection to add the new variable to (optional).
        device: `str`. Device ID to store the variable. Default: '/cpu:0'.
        restore: `bool`. Restore or not this variable when loading a pre-trained model.

    Returns:
        A Variable.
    """

    if isinstance(initializer, six.string_types):
        initializer = getters.get_initializer(initializer)
    # Remove shape param if initializer is a Tensor
    if not callable(initializer) and isinstance(initializer, tf.Tensor):
        shape = None

    if isinstance(regularizer, six.string_types):
        regularizer = getters.get_regularizer(regularizer)

    with tf.device(device_name_or_function=device):
        var = tf.get_variable(name=name,
                              shape=shape,
                              dtype=dtype,
                              initializer=initializer,
                              regularizer=regularizer,
                              trainable=trainable,
                              collections=collections)

        if not restore:
            tf.add_to_collection(name=tf.GraphKeys.EXCL_RESTORE_VARIABLES, value=var)  # @TODO adapt restoring saver

        return var
예제 #24
0
    def _build(self, incoming, *args, **kwargs):
        """
        Args:
            2-D Tensor [samples, ids].

        Returns:
            3-D Tensor [samples, embedded_ids, features].
        """
        input_shape = get_shape(incoming)
        assert len(input_shape) == 2, 'Incoming Tensor shape must be 2-D'

        weights_init = getters.get_initializer(self.weights_init)

        self._w = variable('w', shape=[self.input_dim, self.output_dim],
                           initializer=weights_init,
                           trainable=self.trainable, restore=self.restore)
        track(self._w, tf.GraphKeys.LAYER_VARIABLES, self.module_name)
        inference = tf.cast(x=incoming, dtype=tf.int32)
        inference = tf.nn.embedding_lookup(params=self._w, ids=inference,
                                           validate_indices=self.validate_indices)

        track(inference, tf.GraphKeys.LAYER_TENSOR, self.module_name)
        return inference