Exemplo n.º 1
0
  def __init__(self,
               units,
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               return_sequences=False,
               return_state=False,
               go_backwards=False,
               stateful=False,
               time_major=False,
               **kwargs):
    super(RNN, self).__init__(**kwargs)  # pylint: disable=bad-super-call
    self.units = units
    cell_spec = collections.namedtuple('cell', ['state_size', 'output_size'])
    self.cell = cell_spec(
        state_size=(self.units, self.units), output_size=self.units)
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.return_sequences = return_sequences
    self.return_state = return_state
    self.go_backwards = go_backwards
    self.stateful = stateful
    self.time_major = time_major
    self._num_constants = None
    self._num_inputs = None
    self._states = None
    self.input_spec = [InputSpec(ndim=3)]
    self.state_spec = [
        InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
    ]
  def __init__(self,
               filters,
               kernel_size,
               strides=(1, 1),
               padding='valid',
               data_format=None,
               dilation_rate=(1, 1),
               activation='tanh',
               recurrent_activation='hard_sigmoid',
               use_bias=True,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               dropout=0.,
               recurrent_dropout=0.,
               **kwargs):
    super(ConvLSTM2DCell, self).__init__(**kwargs)
    self.filters = filters
    self.kernel_size = conv_utils.normalize_tuple(kernel_size, 2, 'kernel_size')
    self.strides = conv_utils.normalize_tuple(strides, 2, 'strides')
    self.padding = conv_utils.normalize_padding(padding)
    self.data_format = conv_utils.normalize_data_format(data_format)
    self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, 2,
                                                    'dilation_rate')
    self.activation = activations.get(activation)
    self.recurrent_activation = activations.get(recurrent_activation)
    self.use_bias = use_bias

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.dropout = min(1., max(0., dropout))
    self.recurrent_dropout = min(1., max(0., recurrent_dropout))
    self.state_size = (self.filters, self.filters)
    self._dropout_mask = None
    self._recurrent_dropout_mask = None
Exemplo n.º 3
0
  def __init__(self,
               units,
               activation=None,
               use_bias=True,
               kernel_initializer='glorot_uniform',
               bias_initializer='zeros',
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
               **kwargs):
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(Dense, self).__init__(
        activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
    self.units = int(units)
    self.activation = activations.get(activation)
    self.use_bias = use_bias
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.kernel_constraint = constraints.get(kernel_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.supports_masking = True
    self.input_spec = InputSpec(min_ndim=2)
Exemplo n.º 4
0
 def __init__(self,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(LocallyConnected1D, self).__init__(**kwargs)
   self.filters = filters
   self.kernel_size = conv_utils.normalize_tuple(kernel_size, 1, 'kernel_size')
   self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
   self.padding = conv_utils.normalize_padding(padding)
   if self.padding != 'valid':
     raise ValueError('Invalid border mode for LocallyConnected1D '
                      '(only "valid" is supported): ' + padding)
   self.data_format = conv_utils.normalize_data_format(data_format)
   self.activation = activations.get(activation)
   self.use_bias = use_bias
   self.kernel_initializer = initializers.get(kernel_initializer)
   self.bias_initializer = initializers.get(bias_initializer)
   self.kernel_regularizer = regularizers.get(kernel_regularizer)
   self.bias_regularizer = regularizers.get(bias_regularizer)
   self.activity_regularizer = regularizers.get(activity_regularizer)
   self.kernel_constraint = constraints.get(kernel_constraint)
   self.bias_constraint = constraints.get(bias_constraint)
   self.input_spec = InputSpec(ndim=3)
Exemplo n.º 5
0
  def step(cell_inputs, cell_states):
    h_tm1 = cell_states[0]  # previous memory state
    c_tm1 = cell_states[1]  # previous carry state

    # Only use the second half of the bias weights.
    _, real_bias = array_ops.split(bias, 2)

    z = K.dot(cell_inputs, kernel)
    z += K.dot(h_tm1, recurrent_kernel)
    z = K.bias_add(z, real_bias)

    z0 = z[:, :units]
    z1 = z[:, units:2 * units]
    z2 = z[:, 2 * units:3 * units]
    z3 = z[:, 3 * units:]

    i = activations.get('hard_sigmoid')(z0)
    f = activations.get('hard_sigmoid')(z1)
    c = f * c_tm1 + i * activations.get('tanh')(z2)
    o = activations.get('hard_sigmoid')(z3)

    h = o * activations.get('tanh')(c)
    return h, [h, c]
Exemplo n.º 6
0
  def __init__(self,
               num_units,
               activation=None,
               reuse=None,
               name=None,
               dtype=None,
               **kwargs):
    """Initializes the parameters for an RNN cell.

    Args:
      num_units: int, The number of units in the RNN cell.
      activation: Nonlinearity to use.  Default: `tanh`. It could also be string
        that is within Keras activation function names.
      reuse: (optional) Python boolean describing whether to reuse variables in
        an existing scope. Raises an error if not `True` and the existing scope
        already has the given variables.
      name: String, the name of the layer. Layers with the same name will share
        weights, but to avoid mistakes we require reuse=True in such cases.
      dtype: Default dtype of the layer (default of `None` means use the type of
        the first input). Required when `build` is called before `call`.
      **kwargs: Dict, keyword named properties for common layer attributes, like
        `trainable` etc when constructing the cell from configs of get_config().

    Raises:
      ValueError: If the existing scope already has the given variables.
    """
    super(TfLiteRNNCell, self).__init__(
        _reuse=reuse, name=name, dtype=dtype, **kwargs)

    # Inputs must be Rank-2.
    self.input_spec = base_layer.InputSpec(ndim=2)

    self._tflite_wrapper = op_hint.OpHint("UnidirectionalSequenceRnn")
    self._num_units = num_units
    if activation:
      self._activation = activations.get(activation)
    else:
      self._activation = math_ops.tanh
    def __init__(self,
                 rnn_seq_length,
                 activation='relu',
                 kernel_initializer='random_uniform',
                 merge_mode='concat',
                 output_conv_filter=None,
                 **kwargs):
        """
        Class for Spatial RNN layer capable of learning spatial connections between pixels of an 2D image in an RNN
        fashion along all four directions of up, downs,left and right. Implemented in tensorflow 2.0 with Keras API.
        The RNN unit is plain RNN with ReLu activation function (default) as suggested by Li et. al. (2019).
        The activation function can be chosen from activation function available from tensorflow.python.keras library.
        The spatial connections will be analysed in all principal directions sweeping to right, left, down, up.
        The results from spatial RNN analysis in each of principal directions would have exact same shape of input
        and can be concatenated or merged together depending on "merge_mode" input parameter.
        The "merge_mode" input parameter can be set to either 'concat' (default) or 'convolution'.
        By default, the results for all principal directions will be concatenated together resulting in a final output
        shape with 4 times number of channels as the input channels. In case of 'convolution' merge mode, the results
        for all principal directions will be concatenated and then the number of channels will be converted using a
        1*1 2D convolution layer. The output number of channels will be determined based on the 'output_conv_filter'
        input parameter which by default is et to input shape number of channels.

        The input 2D image is recommended to be square as sufficient testing with non-square input images has not been
        done. When using this layer as the first layer, preceded it with an Keras "Input" layer. Should be used with
        `data_format="channels_last"`. The kernel initializer and activation functions can be set using the ones
        available in tensorflow.python.keras.initializers & tensorflow.python.keras.activations.

        Examples:
        The inputs are 5x5 RGB images with `channels_last` and the batch of 1
        input_shape = (2, 5, 5, 3)  # (batch, height, width, channels)
        x_in = tf.keras.layers.Input((5, 5, 3))
        spatial_rnn_concat = SpatialRNN2D(rnn_seq_length=4, merge_mode='concat')
        spatial_rnn_merge = SpatialRNN2D(rnn_seq_length=4m, merge_mode='convolution')
        y_out = spatial_rnn_concat(x_in)  # output shape of (2, 5, 5, 12)
        y_out = spatial_rnn_merge(x_in)  # output shape of (2, 5, 5, 3)

        :param rnn_seq_length: Integer, the length of pixels sequence to be analysed by RNN unit
        :param activation: (relu) Activation function used after following Spatial RNN and merge convolution layers.
        :param kernel_initializer: (random_uniform) Initializer for the `kernel` weights matrix.
        :param merge_mode: ('concat') To concatenate or merge (by 'convolution') the result for each direction pass.
        :param output_conv_filter: number of output channels in case 'convolution' merge mode is selected.
        """

        super().__init__(**kwargs)
        self.padding = "same"
        if merge_mode not in ['concat', 'convolution']:
            raise ValueError(
                'Unknown merge mode: the merge mode argument can be either \'concat\' or \'convolution\'.'
            )
        self.merge_mode = merge_mode
        self.output_conv_filter = output_conv_filter
        self.seq_length = rnn_seq_length
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_dic = OrderedDict()
        self.kernel_switch_dic = OrderedDict({
            'right': np.array([[1, 0, 0]]),
            'left': np.array([[0, 0, 1]]),
            'down': np.array([[1], [0], [0]]),
            'up': np.array([[0], [0], [1]])
        })
Exemplo n.º 8
0
  def __init__(self,
               units,
               activation='tanh',
               init_kern_1='RandomUniform',
               init_kern_2='RandomUniform',
               init_kern_3='RandomUniform',
               init_recur_kern_2='orthogonal',
               init_out_fb='orthogonal',
               init_bias='zeros',
               connectivity=None,
               reg_kern_1=None,
               reg_kern_3=None,
               reg_out_fb=None,
               reg_bias=None,
               constraint_kern_1=None,
               constraint_kern_3=None,
               constraint_recur_kern_2=None,
               constraint_out_fb=None,
               constraint_bias=None,
               in_dropout=None,
               recur_dropout=None,
               train_kern_1=False,
               train_kern_3=False,
               train_out_fb=False,
               use_out_fb=False,
               use_dropout_mask=False,
               use_recur=False,
               use_clock=False,
               clock_rate=None,
               data_format=None,
               **kwargs):
    super(SimpleRNNCell, self).__init__(**kwargs)
    self.units = units

    self.activation = activations.get(activation)

    self.init_kern_1 = initializers.get(init_kern_1)
    self.init_kern_2 = initializers.get(init_kern_2)
    self.init_kern_3 = initializers.get(init_kern_3)
    self.init_recur_kern_2 = initializers.get(init_recur_kern_2)
    self.init_out_fb = initializers.get(init_out_fb)
    self.init_bias = initializers.get(init_bias)

    if len(connectivity) is not 3:
       self.connectivity_1 = 1.
       self.connectivity_2 = 1.
       self.connectivity_3 = 1.
    else:     
       self.connectivity_1 =  min(1., max(0.,connectivity[0]))
       self.connectivity_2 =  min(1., max(0.,connectivity[1]))
       self.connectivity_3 =  min(1., max(0.,connectivity[2]))
    
    self.reg_kern_1 = regularizers.get(reg_kern_1)
    self.reg_kern_3 = regularizers.get(reg_kern_3)
    self.reg_out_fb = regularizers.get(reg_out_fb)
    self.reg_bias = regularizers.get(reg_bias)
    
    self.constraint_kern_1 = constraints.get(constraint_kern_1)
    self.constraint_kern_3 = constraints.get(constraint_kern_3) 
    self.constraint_recur_kern_2 = constraints.get(constraint_recur_kern_2)
    self.constraint_out_fb = constraints.get(constraint_out_fb)
    self.constraint_bias = constraints.get(constraint_bias)
                                       
    self.in_dropout = min(1., max(0.,in_dropout))
    self.recur_dropout = min(1., max(0.,recur_dropout))
    
    self.train_kern_1 = train_kern_1
    self.train_kern_3 = train_kern_3

    self.clock = clock  
    self.clock_rate = clock_rate

    self.in_dropout_mask = None
    self.recur_dropout_mask = None
    self.state_size = self.units
    self.output_size = self.units
    self.tf_data_format = self.data_format
    self.clock_kernel = None
Exemplo n.º 9
0
 def __init__(self, activation, **kwargs):
   super(Activation, self).__init__(**kwargs)
   self.supports_masking = True
   self.activation = activations.get(activation)
Exemplo n.º 10
0
 def __init__(self, filters, kernel_size, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1),
              groups=1, activation=None, use_bias=True, dtype=DEFAULT_COMPLEX_TYPE,
              kernel_initializer=ComplexGlorotUniform(), bias_initializer=Zeros(),
              kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,
              kernel_constraint=None, bias_constraint=None, **kwargs):
     """
     :param filters: Integer, the dimensionality of the output space (i.e. the number of output filters in the convolution).
     :param kernel_size: An integer or tuple/list of 2 integers, specifying the height
         and width of the 2D convolution window. Can be a single integer to specify
         the same value for all spatial dimensions.
     :param strides: An integer or tuple/list of 2 integers, specifying the strides of
         the convolution along the height and width. Can be a single integer to
         specify the same value for all spatial dimensions. Specifying any stride
         value != 1 is incompatible with specifying any `dilation_rate` value != 1.
     :param padding: one of `"valid"` or `"same"` (case-insensitive).
         `"valid"` means no padding. `"same"` results in padding evenly to
         the left/right or up/down of the input such that output has the same
         height/width dimension as the input.
     :param data_format: A string, one of `channels_last` (default) or `channels_first`.
         The ordering of the dimensions in the inputs. `channels_last` corresponds
         to inputs with shape `(batch_size, height, width, channels)` while
         `channels_first` corresponds to inputs with shape `(batch_size, channels,
         height, width)`. It defaults to the `image_data_format` value found in
         your Keras config file at `~/.keras/keras.json`. If you never set it, then
         it will be `channels_last`.
     :param dilation_rate: an integer or tuple/list of 2 integers, specifying the
         dilation rate to use for dilated convolution. Can be a single integer to
         specify the same value for all spatial dimensions. Currently, specifying
         any `dilation_rate` value != 1 is incompatible with specifying any stride
         value != 1.
     :param groups: A positive integer specifying the number of groups in which the
         input is split along the channel axis. Each group is convolved separately
         with `filters / groups` filters. The output is the concatenation of all
         the `groups` results along the channel axis. Input channels and `filters`
         must both be divisible by `groups`.
     :param activation: Activation function to use. If you don't specify anything, no activation is applied.
         For complex :code:`dtype`, this must be a :code:`cvnn.activations` module.
     :param use_bias: Boolean, whether the layer uses a bias vector.
     :param kernel_initializer: Initializer for the `kernel` weights matrix (see `keras.initializers`).
     :param bias_initializer: Initializer for the bias vector (see `keras.initializers`).
     :param kernel_regularizer: Regularizer function applied to the `kernel` weights matrix (see `keras.regularizers`).
     :param bias_regularizer: Regularizer function applied to the bias vector (see `keras.regularizers`).
     :param activity_regularizer: Regularizer function applied to the output of the layer (its "activation") (see `keras.regularizers`).
     :param kernel_constraint: Constraint function applied to the kernel matrix (see `keras.constraints`).
     :param bias_constraint: Constraint function applied to the bias vector (see `keras.constraints`).
     """
     super(ComplexConv2D, self).__init__(
         rank=2, dtype=dtype,
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         groups=groups,
         activation=activations.get(activation),
         use_bias=use_bias,
         kernel_initializer=initializers.get(kernel_initializer),
         bias_initializer=initializers.get(bias_initializer),
         kernel_regularizer=regularizers.get(kernel_regularizer),
         bias_regularizer=regularizers.get(bias_regularizer),
         activity_regularizer=regularizers.get(activity_regularizer),
         kernel_constraint=constraints.get(kernel_constraint),
         bias_constraint=constraints.get(bias_constraint),
         **kwargs)
Exemplo n.º 11
0
    def __init__(self,
                 units,
                 relations,
                 heads=1,
                 head_aggregation=HeadAggregation.MEAN,
                 attention_mode=AttentionModes.ARGAT,
                 attention_style=AttentionStyles.SUM,
                 attention_units=1,
                 attn_use_edge_features=False,
                 kernel_basis_size=None,
                 attn_kernel_basis_size=None,
                 activation=None,
                 attn_activation=tf.nn.leaky_relu,
                 use_bias=False,
                 batch_normalisation=False,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 attn_kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 attn_kernel_regularizer=None,
                 activity_regularizer=None,
                 feature_dropout=None,
                 support_dropout=None,
                 edge_feature_dropout=None,
                 name='rgat',
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(RelationalGraphAttention, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            name=name,
            **kwargs)

        self.units = int(units)
        self.relations = int(relations)
        self.heads = int(heads)
        self.head_aggregation = HeadAggregation.validate(head_aggregation)
        self.attention_mode = AttentionModes.validate(attention_mode)
        self.attention_style = AttentionStyles.validate(attention_style)
        self.attention_units = attention_units
        self.attn_use_edge_features = attn_use_edge_features

        self.kernel_basis_size = (int(kernel_basis_size)
                                  if kernel_basis_size else None)
        self.attn_kernel_basis_size = (int(attn_kernel_basis_size)
                                       if attn_kernel_basis_size else None)

        self.activation = activations.get(activation)
        self.attn_activation = activations.get(attn_activation)

        self.use_bias = use_bias
        self.batch_normalisation = batch_normalisation

        if self.batch_normalisation:
            self.batch_normalisation_layer = tf.layers.BatchNormalization()

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.attn_kernel_initializer = initializers.get(
            attn_kernel_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.attn_kernel_regularizer = regularizers.get(
            attn_kernel_regularizer)

        self.feature_dropout = feature_dropout
        self.support_dropout = support_dropout
        self.edge_feature_dropout = edge_feature_dropout

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)

        self.dense_layer = rgat_layers.BasisDecompositionDense(
            units=self.relations * self.heads * self.units,
            basis_size=self.kernel_basis_size,
            coefficients_size=self.relations * self.heads,
            use_bias=False,
            kernel_initializer=self.kernel_initializer,
            kernel_regularizer=self.kernel_regularizer,
            name=name + '_basis_decomposition_dense',
            **kwargs)
        self.attention_logits = RelationalGraphAttentionLogits(
            relations=self.relations,
            heads=self.heads,
            attention_style=self.attention_style,
            attention_units=self.attention_units,
            basis_size=self.attn_kernel_basis_size,
            activation=self.attn_activation,
            use_edge_features=self.attn_use_edge_features,
            kernel_initializer=self.attn_kernel_initializer,
            kernel_regularizer=self.attn_kernel_regularizer,
            feature_dropout=self.feature_dropout,
            edge_feature_dropout=self.edge_feature_dropout,
            batch_normalisation=self.batch_normalisation,
            name="logits",
            **kwargs)
        if self.head_aggregation == HeadAggregation.PROJECTION:
            self.projection_layer = keras_layers.Dense(
                units=self.units,
                use_bias=False,
                kernel_initializer=self.kernel_initializer,
                kernel_regularizer=self.kernel_regularizer,
                name="projection",
                **kwargs)
        if self.batch_normalisation:
            self.batch_normalisation_layer = tf.layers.BatchNormalization()
Exemplo n.º 12
0
    def __call__(self,
                 input,
                 forward_kernel,
                 backward_kernel,
                 activation,
                 layer_type,
                 bias=None,
                 **kwargs):

        # Add stop gradients
        input = tf.stop_gradient(input)
        if bias is not None:
            bias = tf.stop_gradient(bias)
        if not self.update_forward:
            forward_kernel = tf.stop_gradient(forward_kernel)

        # Use random noise
        if self.input_distribution is not None:
            if self.input_distribution == 'uniform':
                val = np.sqrt(12 * self.input_stddev**2) / 2
                input = tf.random.uniform(tf.shape(input),
                                          minval=-val,
                                          maxval=val)
            elif self.input_distribution == 'normal':
                input = tf.random.normal(tf.shape(input),
                                         mean=0.0,
                                         stddev=self.input_stddev)
            else:
                raise ValueError

        # Standardize Input
        if self.center_input:
            input = self._center(input)
        if self.normalize_input:
            input = self._normalize(input)

        forward_input = tf.identity(input, name='forward_input')
        backward_input = tf.identity(input, name='backward_input')
        if self.batch_center_backward_input:
            backward_input = self._batch_center(backward_input)

        # Compute Projections
        if layer_type == 'fc':
            forward_projection = tf.matmul(forward_input, forward_kernel)
            backward_projection = tf.matmul(backward_input, backward_kernel,
                                            transpose_b=True)
            if bias is not None:
                if self.use_bias_forward:
                    forward_projection = tf.nn.bias_add(forward_projection, bias)
                if self.use_bias_backward:
                    backward_projection = tf.nn.bias_add(backward_projection, bias)
        elif layer_type == 'conv':
            forward_projection = tf.nn.conv2d(forward_input,
                                              forward_kernel,
                                              **kwargs)
            backward_projection = tf.nn.conv2d(backward_input,
                                               backward_kernel,
                                               **kwargs)
            if bias is not None:
                if self.use_bias_forward:
                    forward_projection = tf.nn.bias_add(forward_projection,
                                                        bias,
                                                        data_format='NHWC')
                if self.use_bias_backward:
                    backward_projection = tf.nn.bias_add(backward_projection,
                                                         bias,
                                                         data_format='NHWC')
        else:
            raise ValueError

        # Apply Activation
        if self.activation_fn_override is not None:
            activation = activations.get(self.activation_fn_override)
            print("Overriding the default activation function with {}".format(activation))
        if self.activation_forward and (activation is not None):
            forward_projection = activation(forward_projection)
            print("Using activation forward: {}".format(activation))
        if self.activation_backward and (activation is not None):
            backward_projection = activation(backward_projection)
            print("Using activation backward: {}".format(activation))

        # Compute Reconstruction
        if layer_type == 'fc':
            forward_reconstruction = tf.matmul(forward_projection, backward_kernel)
            backward_reconstruction = tf.matmul(backward_projection, forward_kernel,
                                                transpose_b=True)
        elif layer_type == 'conv':
            forward_reconstruction = tf.nn.conv2d_transpose(forward_projection,
                                                            backward_kernel,
                                                            tf.shape(input),
                                                            **kwargs)
            backward_reconstruction = tf.nn.conv2d_transpose(backward_projection,
                                                             forward_kernel,
                                                             tf.shape(input),
                                                             **kwargs)
        else:
            raise ValueError

        # Standardize forward output
        if self.center_forward_output:
            forward_projection = self._center(forward_projection)
            forward_reconstruction = self._center(forward_reconstruction)
        if self.normalize_forward_output:
            forward_projection = self._normalize(forward_projection)
            forward_reconstruction = self._normalize(forward_reconstruction)
        if self.batch_center_forward_output:
            forward_projection = self._batch_center(forward_projection)
            forward_reconstruction = self._batch_center(forward_reconstruction)

        # Standardize backward output
        if self.center_backward_output:
            backward_projection = self._center(backward_projection)
            backward_reconstruction = self._center(backward_reconstruction)
        if self.normalize_backward_output:
            backward_projection = self._normalize(backward_projection)
            backward_reconstruction = self._normalize(backward_reconstruction)

        # Reshape backward kernel
        if layer_type == 'fc':
            backward_kernel = tf.transpose(backward_kernel)
        elif layer_type == 'conv':
            pass
        else:
            raise ValueError

        # Add alignment loss
        loss = self.regularization(input,
                                   forward_kernel,
                                   backward_kernel,
                                   forward_projection,
                                   backward_projection,
                                   forward_reconstruction,
                                   backward_reconstruction)
        tf.add_to_collection('ALIGNMENT_LOSSES', loss)
Exemplo n.º 13
0
    def __init__(self,
                 rank,
                 filters,
                 kernel_size,
                 param_reduction=0.5,
                 form='individual',
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 groups=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 conv_op=None,
                 **kwargs):
        super(SzConv, self).__init__(
            trainable=trainable,
            name=name,
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.rank = rank

        if isinstance(filters, float):
            filters = int(filters)
        if filters is not None and filters < 0:
            raise ValueError(f'Received a negative value for `filters`.'
                             f'Was expecting a positive value, got {filters}.')
        self.filters = filters
        self.groups = groups or 1
        self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                      'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
            dilation_rate, rank, 'dilation_rate')

        self.activation = activations.get(activation)
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=self.rank + 2)

        self._validate_init()
        self._is_causal = self.padding == 'causal'
        self._channels_first = self.data_format == 'channels_first'
        self._tf_data_format = conv_utils.convert_data_format(
            self.data_format, self.rank + 2)

        self.reduction_sv = param_reduction
        self.form = form
        self.num_ones = 0
        self.num_weights = 0
        self.reduced_ratio = 0
        self.halfbandwidth = 0
Exemplo n.º 14
0
    def __init__(self,
                 num_units,
                 use_peepholes=False,
                 cell_clip=None,
                 initializer=None,
                 num_proj=None,
                 proj_clip=None,
                 num_unit_shards=None,
                 num_proj_shards=None,
                 forget_bias=1.0,
                 state_is_tuple=True,
                 activation=None,
                 reuse=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        """Initialize the parameters for an LSTM cell.
    Args:
      num_units: int, The number of units in the LSTM cell.
      use_peepholes: bool, set True to enable diagonal/peephole connections.
      cell_clip: (optional) A float value, if provided the cell state is clipped
        by this value prior to the cell output activation.
      initializer: (optional) The initializer to use for the weight and
        projection matrices.
      num_proj: (optional) int, The output dimensionality for the projection
        matrices.  If None, no projection is performed.
      proj_clip: (optional) A float value.  If `num_proj > 0` and `proj_clip` is
        provided, then the projected values are clipped elementwise to within
        `[-proj_clip, proj_clip]`.
      num_unit_shards: Deprecated, will be removed by Jan. 2017.
        Use a variable_scope partitioner instead.
      num_proj_shards: Deprecated, will be removed by Jan. 2017.
        Use a variable_scope partitioner instead.
      forget_bias: Biases of the forget gate are initialized by default to 1
        in order to reduce the scale of forgetting at the beginning of
        the training. Must set it manually to `0.0` when restoring from
        CudnnLSTM trained checkpoints.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  If False, they are concatenated
        along the column axis.  This latter behavior will soon be deprecated.
      activation: Activation function of the inner states.  Default: `tanh`. It
        could also be string that is within Keras activation function names.
      reuse: (optional) Python boolean describing whether to reuse variables
        in an existing scope.  If not `True`, and the existing scope already has
        the given variables, an error is raised.
      name: String, the name of the layer. Layers with the same name will
        share weights, but to avoid mistakes we require reuse=True in such
        cases.
      dtype: Default dtype of the layer (default of `None` means use the type
        of the first input). Required when `build` is called before `call`.
      **kwargs: Dict, keyword named properties for common layer attributes, like
        `trainable` etc when constructing the cell from configs of get_config().
      When restoring from CudnnLSTM-trained checkpoints, use
      `CudnnCompatibleLSTMCell` instead.
    """
        super(T_LSTMCell, self).__init__(_reuse=reuse,
                                         name=name,
                                         dtype=dtype,
                                         **kwargs)
        if not state_is_tuple:
            logging.warn(
                "%s: Using a concatenated state is slower and will soon be "
                "deprecated.  Use state_is_tuple=True.", self)
        if num_unit_shards is not None or num_proj_shards is not None:
            logging.warn(
                "%s: The num_unit_shards and proj_unit_shards parameters are "
                "deprecated and will be removed in Jan 2017.  "
                "Use a variable scope with a partitioner instead.", self)
        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn(
                "%s: Note that this cell is not optimized for performance. "
                "Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
                "performance on GPU.", self)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)

        self._num_units = num_units
        self._use_peepholes = use_peepholes
        self._cell_clip = cell_clip
        self._initializer = initializers.get(initializer)
        self._num_proj = num_proj
        self._proj_clip = proj_clip
        self._num_unit_shards = num_unit_shards
        self._num_proj_shards = num_proj_shards
        self._forget_bias = forget_bias
        self._state_is_tuple = state_is_tuple
        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh

        if num_proj:
            self._state_size = (LSTMStateTuple(num_units, num_proj)
                                if state_is_tuple else num_units + num_proj)
            self._output_size = num_proj
        else:
            self._state_size = (LSTMStateTuple(num_units, num_units)
                                if state_is_tuple else 2 * num_units)
            self._output_size = num_units
Exemplo n.º 15
0
 def __init__(self, activation, **kwargs):
   super(Activation, self).__init__(**kwargs)
   self.supports_masking = True
   self.activation = activations.get(activation)
Exemplo n.º 16
0
    def __init__(self,
                 num_units,
                 initializer=None,
                 input_shape=None,
                 forget_bias=1.0,
                 trainable=True,
                 do_norm=False,
                 useDropout=False,
                 activation=None,
                 reuse=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        """Initialize the parameters for an LSTM cell.
        Args:
          num_units: int, The number of units in the LSTM cell.
          initializer: (optional) The initializer to use for the weight and
            projection matrices.
          forget_bias: Biases of the forget gate are initialized by default to 1
            in order to reduce the scale of forgetting at the beginning of
            the training. Must set it manually to `0.0` when restoring from
            CudnnLSTM trained checkpoints.
          activation: Activation function of the inner states.  Default: `tanh`. It
            could also be string that is within Keras activation function names.
          reuse: (optional) Python boolean describing whether to reuse variables
            in an existing scope.  If not `True`, and the existing scope already has
            the given variables, an error is raised.
          name: String, the name of the layer. Layers with the same name will
            share weights, but to avoid mistakes we require reuse=True in such
            cases.
          dtype: Default dtype of the layer (default of `None` means use the type
            of the first input). Required when `build` is called before `call`.
          **kwargs: Dict, keyword named properties for common layer attributes, like
            `trainable` etc when constructing the cell from configs of get_config().
          When restoring from CudnnLSTM-trained checkpoints, use
          `CudnnCompatibleLSTMCell` instead.
        """
        super(STLSTMCell, self).__init__(_reuse=reuse,
                                         name=name,
                                         trainable=trainable,
                                         dtype=dtype,
                                         **kwargs)
        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn(
                "%s: Note that this cell is not optimized for performance. "
                "Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
                "performance on GPU.", self)

        # Inputs must be 2-dimensional.
        self.input_spec = base_layer.InputSpec(ndim=2)
        self.do_norm = do_norm
        self.useDropout = useDropout
        self._num_units = num_units
        self._forget_bias = forget_bias
        # self._initializer = initializers.get(initializer)
        self._initializer = initializer

        # self._state_is_tuple = state_is_tuple
        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh

        self._state_size = (LSTMStateTuple(num_units, num_units))
        self._output_size = num_units
        if input_shape is None:
            raise ValueError("Expected inputs_shape to be known")
        else:
            if not self.built:
                self.build(input_shape)
Exemplo n.º 17
0
 def __init__(self, units, use_bias=True, **kwargs):
     super(LstmCell, self).__init__(**kwargs)
     self.activation = activations.get('tanh')
     self.recurrent_activation = activations.get('hard_sigmoid')
     self.units = units
     self.use_bias = use_bias