Exemple #1
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            input_dim, input_length = input_shape[1], input_shape[2]
        else:
            input_dim, input_length = input_shape[2], input_shape[1]

        if input_dim is None:
            raise ValueError(
                'Axis 2 of input should be fully-defined. '
                'Found shape:', input_shape)
        self.output_length = conv_utils.conv_output_length(
            input_length, self.kernel_size[0], self.padding, self.strides[0])
        self.kernel_shape = (self.output_length,
                             self.kernel_size[0] * input_dim, self.filters)
        self.kernel = self.add_weight(shape=self.kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.output_length,
                                               self.filters),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        if self.data_format == 'channels_first':
            self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
        else:
            self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
        self.built = True
Exemple #2
0
 def __init__(self,
              return_sequences=False,
              return_state=False,
              go_backwards=False,
              stateful=False,
              time_major=False,
              **kwargs):
     # We invoke the base layer's initializer directly here because we do not
     # want to create RNN cell instance.
     super(RNN, self).__init__(**kwargs)  # pylint: disable=bad-super-call
     self.return_sequences = return_sequences
     self.return_state = return_state
     self.go_backwards = go_backwards
     self.stateful = stateful
     self.time_major = time_major
     self.supports_masking = False
     self.input_spec = [InputSpec(ndim=3)]
     if hasattr(self.cell.state_size, '__len__'):
         state_size = self.cell.state_size
     else:
         state_size = [self.cell.state_size]
     self.state_spec = [InputSpec(shape=(None, dim)) for dim in state_size]
     self.constants_spec = None
     self._states = None
     self._num_constants = None
     self._num_inputs = None
     self._vector_shape = constant_op.constant([-1])
Exemple #3
0
    def __init__(self,
                 feature_units,
                 activation="relu",
                 return_attention=False,
                 node_axis="row",
                 merge_method="add",
                 use_attention_kernel=True,
                 **kwargs):

        super(SimpleAttentionLayer, self).__init__(units=feature_units,
                                                   activation=activation,
                                                   **kwargs)
        if merge_method == "concat" and not use_attention_kernel:
            raise Exception("Can't use concat without attention")

        self.return_attention = return_attention
        self.node_axis = node_axis
        self.merge_method = merge_method
        self.use_attention_kernel = use_attention_kernel
        self.input_spec = [InputSpec(ndim=3), InputSpec(ndim=3)]
        self.supports_masking = False

        self.self_kernel = None
        self.neighbor_kernel = None
        self.attention_kernel = None
        self.bias = None
Exemple #4
0
    def build(self, input_shape):
        # Note input_shape will be list of shapes of initial states and
        # constants if these are passed in __call__.
        if self._num_constants is not None:
            constants_shape = input_shape[-self._num_constants:]  # pylint: disable=E1130
        else:
            constants_shape = None

        if isinstance(input_shape, list):
            input_shape = input_shape[0]

        batch_size = input_shape[0] if self.stateful else None
        self.input_spec[0] = InputSpec(shape=(batch_size, None) +
                                       input_shape[2:5])

        # allow cell (if layer) to build before we set or validate state_spec
        if isinstance(self.cell, Layer):
            step_input_shape = (input_shape[0], ) + input_shape[2:]
            if constants_shape is not None:
                self.cell.build([step_input_shape] + constants_shape)
            else:
                self.cell.build(step_input_shape)

        # set or validate state_spec
        if hasattr(self.cell.state_size, '__len__'):
            state_size = list(self.cell.state_size)
        else:
            state_size = [self.cell.state_size]

        if self.state_spec is not None:
            # initial_state was passed in call, check compatibility
            if self.cell.data_format == 'channels_first':
                ch_dim = 1
            elif self.cell.data_format == 'channels_last':
                ch_dim = 3
            if [spec.shape[ch_dim] for spec in self.state_spec] != state_size:
                raise ValueError(
                    'An initial_state was passed that is not compatible with '
                    '`cell.state_size`. Received `state_spec`={}; '
                    'However `cell.state_size` is '
                    '{}'.format([spec.shape for spec in self.state_spec],
                                self.cell.state_size))
        else:
            if self.cell.data_format == 'channels_first':
                self.state_spec = [
                    InputSpec(shape=(None, dim, None, None))
                    for dim in state_size
                ]
            elif self.cell.data_format == 'channels_last':
                self.state_spec = [
                    InputSpec(shape=(None, None, None, dim))
                    for dim in state_size
                ]
        if self.stateful:
            self.reset_states()
        self.built = True
Exemple #5
0
  def __init__(self,
               units,
               kernel_initializer='glorot_uniform',
               recurrent_initializer='orthogonal',
               bias_initializer='zeros',
               unit_forget_bias=True,
               kernel_regularizer=None,
               recurrent_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               recurrent_constraint=None,
               bias_constraint=None,
               return_sequences=False,
               return_state=False,
               go_backwards=False,
               stateful=False,
               time_major=False,
               **kwargs):
    super(RNN, self).__init__(**kwargs)  # pylint: disable=bad-super-call
    self.units = units
    cell_spec = collections.namedtuple('cell', ['state_size', 'output_size'])
    self.cell = cell_spec(
        state_size=(self.units, self.units), output_size=self.units)

    self.kernel_initializer = initializers.get(kernel_initializer)
    self.recurrent_initializer = initializers.get(recurrent_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.unit_forget_bias = unit_forget_bias

    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.activity_regularizer = regularizers.get(activity_regularizer)

    self.kernel_constraint = constraints.get(kernel_constraint)
    self.recurrent_constraint = constraints.get(recurrent_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.return_sequences = return_sequences
    self.return_state = return_state
    self.go_backwards = go_backwards
    self.stateful = stateful
    self.time_major = time_major
    self._num_constants = None
    self._num_inputs = None
    self._states = None
    self.input_spec = [InputSpec(ndim=3)]
    self.state_spec = [
        InputSpec(shape=(None, dim)) for dim in (self.units, self.units)
    ]
    def __init__(
            self,
            feature_units,
            attn_heads=1,
            attn_heads_reduction="concat",  # {"concat", "average"}
            dropout_rate=0.5,
            activation="relu",
            attn_kernel_initializer="glorot_uniform",
            attn_kernel_regularizer=None,
            attn_kernel_constraint=None,
            attention=True,
            return_attention=False,
            **kwargs):

        if attn_heads_reduction not in {"concat", "average"}:
            raise ValueError("Possbile reduction methods: concat, average")

        super(GraphAttentionLayer, self).__init__(units=feature_units,
                                                  activation=activation,
                                                  **kwargs)

        # Number of attention heads (K in the paper)
        self.attn_heads = attn_heads
        # Eq. 5 and 6 in the paper
        self.attn_heads_reduction = attn_heads_reduction
        # Internal dropout rate
        self.dropout_rate = dropout_rate

        self.attn_kernel_initializer \
            = initializers.get(attn_kernel_initializer)
        self.attn_kernel_regularizer \
            = regularizers.get(attn_kernel_regularizer)
        self.attn_kernel_constraint = constraints.get(attn_kernel_constraint)
        self.attention = attention
        self.return_attention = return_attention
        self.input_spec = [InputSpec(ndim=3), InputSpec(ndim=3)]
        self.supports_masking = False

        # Populated by build()
        self.kernels = []
        self.biases = []
        self.neighbor_kernels = []
        self.attn_kernels = []
        self.attention_biases = []

        if attn_heads_reduction == "concat":
            # Output will have shape (..., K * F")
            self.output_dim = self.units * self.attn_heads
        else:
            # Output will have shape (..., F")
            self.output_dim = self.units
Exemple #7
0
    def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
        inputs, initial_state, constants = _standardize_args(
            inputs, initial_state, constants, self._num_constants)

        if initial_state is None and constants is None:
            return super(STLSTM2D, self).__call__(inputs, **kwargs)

        # If any of `initial_state` or `constants` are specified and are Keras
        # tensors, then add them to the inputs and temporarily modify the
        # input_spec to include them.

        additional_inputs = []
        additional_specs = []
        if initial_state is not None:
            kwargs['initial_state'] = initial_state
            additional_inputs += initial_state
            self.state_spec = []
            for state in initial_state:
                shape = K.int_shape(state)
                self.state_spec.append(InputSpec(shape=shape))

            additional_specs += self.state_spec
        if constants is not None:
            kwargs['constants'] = constants
            additional_inputs += constants
            self.constants_spec = [
                InputSpec(shape=K.int_shape(constant))
                for constant in constants
            ]
            self._num_constants = len(constants)
            additional_specs += self.constants_spec
        # at this point additional_inputs cannot be empty
        for tensor in additional_inputs:
            if K.is_keras_tensor(tensor) != K.is_keras_tensor(
                    additional_inputs[0]):
                raise ValueError('The initial state or constants of an RNN'
                                 ' layer cannot be specified with a mix of'
                                 ' Keras tensors and non-Keras tensors')

        if K.is_keras_tensor(additional_inputs[0]):
            # Compute the full input spec, including state and constants
            full_input = [inputs] + additional_inputs
            full_input_spec = self.input_spec + additional_specs
            # Perform the call with temporarily replaced input_spec
            original_input_spec = self.input_spec
            self.input_spec = full_input_spec
            output = super(STLSTM2D, self).__call__(full_input, **kwargs)
            self.input_spec = original_input_spec
            return output
        else:
            return super(STLSTM2D, self).__call__(inputs, **kwargs)
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            input_dim, input_length = input_shape[1], input_shape[2]
        else:
            input_dim, input_length = input_shape[2], input_shape[1]

        if input_dim is None:
            raise ValueError(
                'Axis 2 of input should be fully-defined. '
                'Found shape:', input_shape)
        self.output_length = self.mask.shape[1]
        #         print("output length is " + str(self.output_length))
        if self.data_format == 'channels_first':
            self.kernel_shape = (input_dim, input_length, self.filters,
                                 self.output_length)
        else:
            self.kernel_shape = (input_length, input_dim, self.output_length,
                                 self.filters)

        self.kernel = self.add_weight(
            shape=(len(self.mask.data),
                   ),  # sum of all nonzero values in mask sum(sum(mask))
            initializer=self.kernel_initializer,
            name='kernel',
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint)

        self.kernel_mask = get_locallyDirected1D_mask(
            self.mask,
            self.kernel,
            data_format=self.data_format,
            dtype=self.kernel.dtype)

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.output_length,
                                               self.filters),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        if self.data_format == 'channels_first':
            self.input_spec = InputSpec(ndim=3, axes={1: input_dim})
        else:
            self.input_spec = InputSpec(ndim=3, axes={-1: input_dim})
        self.built = True
    def build(self, input_shape):
        """
        Reimplementation of tf.keras.layers.Conv3D creating multiple conv ops
        with varying degree of dilation with shared weights
        """
        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            self.channel_axis = 1
        else:
            self.channel_axis = -1
        if input_shape[self.channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[self.channel_axis])

        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={self.channel_axis: input_dim})

        # Prepare weights and conv ops for dilated conv layers
        with tf.name_scope("dilated_conv_weights"):
            self.build_dilated_conv(input_shape, input_dim)

        # Prepare weights and conv ops for attention mechanism
        with tf.name_scope("attention_weights"):
            self.build_attention(input_shape, input_dim)

        # Set build flag, needed if called directly
        self.built = True
Exemple #10
0
 def build(self, input_shape):
     dtype = dtypes.as_dtype(self.dtype or K.floatx())
     if not (dtype.is_floating or dtype.is_complex):
         raise TypeError(
             'Unable to build `Dense` layer with non-floating point '
             'dtype %s' % (dtype, ))
     input_shape = tensor_shape.TensorShape(input_shape)
     if input_shape[-1].value is None:
         raise ValueError('The last dimension of the inputs to `Dense` '
                          'should be defined. Found `None`.')
     self.input_spec = InputSpec(min_ndim=2,
                                 axes={-1: input_shape[-1].value})
     self.kernel = self.add_weight(
         'kernel',
         shape=[input_shape[-1].value, self.units],
         initializer=self.kernel_initializer,
         regularizer=self.kernel_regularizer,
         constraint=self.kernel_constraint,
         dtype=self.dtype,
         trainable=True)
     if self.use_bias:
         self.bias = self.add_weight('bias',
                                     shape=[
                                         self.units,
                                     ],
                                     initializer=self.bias_initializer,
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint,
                                     dtype=self.dtype,
                                     trainable=True)
     else:
         self.bias = None
     self.built = True
Exemple #11
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(GraphConvolutionalLayer, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)

        self.units = int(units)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(min_ndim=3)
 def __init__(self,
              cell,
              return_sequences=False,
              return_state=False,
              go_backwards=False,
              stateful=False,
              unroll=False,
              **kwargs):
   if unroll:
     raise TypeError('Unrolling isn\'t possible with '
                     'convolutional RNNs.')
   if isinstance(cell, (list, tuple)):
     # The StackedConvRNN2DCells isn't implemented yet.
     raise TypeError('It is not possible at the moment to'
                     'stack convolutional cells.')
   super(ConvRNN2D, self).__init__(cell,
                                   return_sequences,
                                   return_state,
                                   go_backwards,
                                   stateful,
                                   unroll,
                                   **kwargs)
   self.input_spec = [InputSpec(ndim=5)]
   self.states = None
   self._num_constants = None
Exemple #13
0
    def __init__(self, output_dim, init='glorot_uniform',
                 init_sigma=lambda shape, name:init_uniform(shape, -10, -5, name=name),
                 activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, W_sigma_regularizer=None, b_sigma_regularizer=None,
                 activity_regularizer=None,
                 bias=True, input_dim=None, **kwargs):
        self.init = initializers.get(init)
        self.init_sigma = init_sigma
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim
        self.uses_learning_phase = True

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.W_sigma_regularizer = regularizers.get(W_sigma_regularizer)
        self.b_sigma_regularizer = regularizers.get(b_sigma_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(BayesianDense, self).__init__(**kwargs)
Exemple #14
0
  def __init__(self,
               units,
               activation=None,
               use_bias=True,
               kernel_initializer='glorot_uniform',
               bias_initializer='zeros',
               kernel_regularizer=None,
               bias_regularizer=None,
               activity_regularizer=None,
               kernel_constraint=None,
               bias_constraint=None,
               **kwargs):
    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super(Dense, self).__init__(
        activity_regularizer=regularizers.get(activity_regularizer), **kwargs)
    self.units = int(units)
    self.activation = activations.get(activation)
    self.use_bias = use_bias
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
    self.kernel_regularizer = regularizers.get(kernel_regularizer)
    self.bias_regularizer = regularizers.get(bias_regularizer)
    self.kernel_constraint = constraints.get(kernel_constraint)
    self.bias_constraint = constraints.get(bias_constraint)

    self.supports_masking = True
    self.input_spec = InputSpec(min_ndim=2)
Exemple #15
0
 def build(self, input_shape):
   input_shape = tensor_shape.TensorShape(input_shape)
   if input_shape[-1].value is None:
     raise ValueError('The last dimension of the inputs to `Dense` '
                      'should be defined. Found `None`.')
   self.input_spec = InputSpec(min_ndim=2,
                               axes={-1: input_shape[-1].value})
   self.kernel = self.add_weight(
       'kernel',
       shape=[input_shape[-1].value, self.units],
       initializer=self.kernel_initializer,
       regularizer=self.kernel_regularizer,
       constraint=self.kernel_constraint,
       dtype=self.dtype,
       trainable=True)
   if self.use_bias:
     self.bias = self.add_weight(
         'bias',
         shape=[self.units,],
         initializer=self.bias_initializer,
         regularizer=self.bias_regularizer,
         constraint=self.bias_constraint,
         dtype=self.dtype,
         trainable=True)
   else:
     self.bias = None
   self.built = True
Exemple #16
0
    def __init__(self,
                 tied_layer='',
                 activation=None,
                 use_bias=True,
                 bias_initializer='zeros',
                 bias_regularizer=None,
                 activity_regularizer=None,
                 bias_constraint=None,
                 varName='',
                 varShape=[],
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(DenseTied, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        if tied_layer != '':
            self.kernelFrom = tied_layer.kernel.name
        self.varName = varName
        self.varShape = varShape
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Exemple #17
0
    def __init__(self,
                 num_units,
                 activation=None,
                 reuse=None,
                 kernel_initialier=None,
                 bias_initializer=None,
                 name=None,
                 dtype=None,
                 **kwargs):
        super(GRUCell, self).__init__(_reuse=reuse,
                                      name=name,
                                      dtype=dtype,
                                      **kwargs)
        #_check_supported_dtypes(self.dtype)

        if context.executing_eagerly() and context.num_gpus() > 0:
            logging.warn("This is not optimized for performance.")
        self.input_spec = InputSpec(ndim=2)
        self._num_units = num_units
        if activation:
            self._activation = activations.get(activation)
        else:
            self._activation = math_ops.tanh
        self._kernel_initializer = initializers.get(kernel_initialier)
        self._bias_initializer = initializers.get(bias_initializer)
Exemple #18
0
    def __init__(
            self,
            units,
            activation=None,
            use_bias=True,
            # kernel_initializer='glorot_uniform',
            bias_initializer='zeros',
            # kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            # kernel_constraint=None,
            bias_constraint=None,
            tied_to=None,
            **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super(DenseTied, self).__init__(
            activity_regularizer=regularizers.get(activity_regularizer),
            **kwargs)
        self.tied_to = tied_to
        self.units = int(units)
        self.activation = activations.get(activation)
        """transposed weights are variables and don't use any regularizators"""

        # self.kernel_initializer = None
        # self.kernel_constraint = None  #regularizers.get(kernel_regularizer)
        # self.kernel_regularizer = None #constraints.get(kernel_constraint)
        """biases are still initialized and regularized"""
        self.use_bias = use_bias
        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)
Exemple #19
0
    def build(self, input_shape):
        ndim = len(input_shape)
        if self.axis == 0:
            raise ValueError('Axis cannot be zero')

        if (self.axis is not None) and (ndim == 2):
            raise ValueError('Cannot specify axis for rank 1 tensor')

        self.input_spec = InputSpec(ndim=ndim)

        if self.axis is None:
            shape = (1, )
        else:
            shape = (input_shape[self.axis], )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None
        self.built = True
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if len(input_shape) != 4:
            raise ValueError('Inputs should have rank 4. Received input '
                             'shape: ' + str(input_shape))
        channel_axis = self._get_channel_axis()
        if input_shape.dims[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = int(input_shape[channel_axis])
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        kernel_shape = self.kernel_size + (self.filters, input_dim)

        if self.tied_to is not None:
            self.kernel = self.tied_to.kernel
            self._non_trainable_weights.append(self.kernel)
        else:
            self.kernel = self.add_weight(name='kernel',
                                          shape=kernel_shape,
                                          initializer=self.kernel_initializer,
                                          regularizer=self.kernel_regularizer,
                                          constraint=self.kernel_constraint,
                                          trainable=True,
                                          dtype=self.dtype)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None
        self.built = True
Exemple #21
0
    def __init__(self, kernel_size=3, activation='elu'):
        super().__init__()
        self.kernel_size = kernel_size
        self.activation = activation

        self._input_spec = InputSpec(ndim=3)
        self.supports_masking = True
Exemple #22
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(
            input_shape)  # The input should be [batch_size x units x N]
        if len(input_shape) != 3 or input_shape[
                -1].value is None or input_shape[-2].value is None:
            raise ValueError(
                'The last two dimensions of the GCN are wrong: {}. Input should be [BS x N x units]'
                .format(input_shape))
        self.input_spec = InputSpec(min_ndim=3,
                                    axes={
                                        -1: input_shape[-1].value,
                                        -2: input_shape[-2].value
                                    })

        self.kernel = self.add_weight(
            'kernel',
            shape=[input_shape[-1].value, self.units],
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint,
            dtype=self.dtype,
            trainable=True)

        if self.use_bias:
            self.bias = self.add_weight('bias',
                                        shape=[
                                            self.units,
                                        ],
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        dtype=self.dtype,
                                        trainable=True)
 def __init__(self,
              mask,
              filters,
              padding='valid',
              data_format=None,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(LocallyDirected1D, self).__init__(**kwargs)
     self.filters = filters
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=3)
     self.mask = mask
Exemple #24
0
    def build(self, input_shape):
        """Build `Layer`"""
        input_shape = tensor_shape.TensorShape(input_shape).as_list()
        self.input_spec = InputSpec(shape=input_shape)

        if not self.layer.built:
            self.layer.build(input_shape)
            self.layer.built = False

            if not hasattr(self.layer, 'kernel'):
                raise ValueError('`WeightNormalization` must wrap a layer that'
                                 ' contains a `kernel` for weights')

            # The kernel's filter or unit dimension is -1
            self.layer_depth = int(self.layer.kernel.shape[-1])
            self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1))

            self.layer.v = self.layer.kernel
            self.layer.g = self.layer.add_variable(
                name="g",
                shape=(self.layer_depth, ),
                initializer=initializers.get('ones'),
                dtype=self.layer.kernel.dtype,
                trainable=True,
                aggregation=tf_variables.VariableAggregation.MEAN)

            with ops.control_dependencies(
                [self.layer.g.assign(self._init_norm(self.layer.v))]):
                self._compute_weights()

            self.layer.built = True

        super(WeightNormalization, self).build()
        self.built = True
Exemple #25
0
    def build(self, input_shape):
        input_shape = tensor_shape.TensorShape(input_shape)
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape.dims[channel_axis].value is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')

        input_dim = int(input_shape[channel_axis])
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.wn_g = self.add_weight(
            name='wn_g',
            shape=(self.filters, ),
            initializer=tf.keras.initializers.RandomUniform(1, 1),
            trainable=True,
            dtype=self.dtype)

        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint,
                                      trainable=True,
                                      dtype=self.dtype)

        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint,
                                        trainable=True,
                                        dtype=self.dtype)
        else:
            self.bias = None

        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})

        if self.padding == 'causal':
            op_padding = 'valid'
        else:
            op_padding = self.padding
        if not isinstance(op_padding, (list, tuple)):
            op_padding = op_padding.upper()

        self._convolution_op = nn_ops.Convolution(
            input_shape,
            filter_shape=self.kernel.shape,
            dilation_rate=self.dilation_rate,
            strides=self.strides,
            padding=op_padding,
            data_format=conv_utils.convert_data_format(self.data_format,
                                                       self.rank + 2))

        self.built = True
Exemple #26
0
 def __init__(self, dims, **kwargs):
   super(Permute, self).__init__(**kwargs)
   self.dims = tuple(dims)
   if sorted(dims) != list(range(1, len(dims) + 1)):
     raise ValueError(
         'Invalid permutation `dims` for Permute Layer: %s. '
         'The set of indices in `dims` must be consecutive and start from 1.' %
         (dims,))
   self.input_spec = InputSpec(ndim=len(self.dims) + 1)
Exemple #27
0
 def __init__(self, rate, data_format=None, **kwargs):
   super(SpatialDropout3D, self).__init__(rate, **kwargs)
   if data_format is None:
     data_format = K.image_data_format()
   if data_format not in {'channels_last', 'channels_first'}:
     raise ValueError('data_format must be in '
                      '{"channels_last", "channels_first"}')
   self.data_format = data_format
   self.input_spec = InputSpec(ndim=5)
Exemple #28
0
    def __init__(self, rank,
                 lgroups,
                 lfilters,
                 kernel_size,
                 strides=1,
                 padding='valid',
                 data_format=None,
                 dilation_rate=1,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 trainable=True,
                 name=None,
                 **kwargs):
        super(_GroupConv, self).__init__(
                trainable=trainable,
                name=name,
                activity_regularizer=regularizers.get(activity_regularizer),
                **kwargs)
        self.rank = rank
        if rank > 2:
            raise ValueError('The quick group convolution does not support 3D or any higher dimension.')
        initRank = rank
        self.lgroups = lgroups
        self.lfilters = lfilters
        self.kernel_size = conv_utils.normalize_tuple(
                kernel_size, rank, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        if (self.padding == 'causal' and not isinstance(self, (Conv1D, SeparableConv1D))):
            raise ValueError('Causal padding is only supported for `Conv1D` and ``SeparableConv1D`.')
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.dilation_rate = conv_utils.normalize_tuple(
             dilation_rate, rank, 'dilation_rate')
        if rank == 1: # when rank=1, expand the tuples to 2D case.
            self.kernel_size = (1, *self.kernel_size)
            self.strides = (1, *self.strides)
            self.dilation_rate = (1, *self.dilation_rate)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=self.rank + 2)

        self.group_input_dim = None
        self.exp_dim_pos = None
Exemple #29
0
 def build(self, input_shape):
     if self.data_format == 'channels_last':
         input_row, input_col = input_shape[1:-1]
         input_filter = input_shape[3]
     else:
         input_row, input_col = input_shape[2:]
         input_filter = input_shape[1]
     if input_row is None or input_col is None:
         raise ValueError('The spatial dimensions of the inputs to '
                          ' a LocallyConnected2D layer '
                          'should be fully-defined, but layer received '
                          'the inputs shape ' + str(input_shape))
     output_row = conv_utils.conv_output_length(input_row,
                                                self.kernel_size[0],
                                                self.padding,
                                                self.strides[0])
     output_col = conv_utils.conv_output_length(input_col,
                                                self.kernel_size[1],
                                                self.padding,
                                                self.strides[1])
     self.output_row = output_row
     self.output_col = output_col
     self.kernel_shape = (output_row * output_col, self.kernel_size[0] *
                          self.kernel_size[1] * input_filter, self.filters)
     self.kernel = self.add_weight(shape=self.kernel_shape,
                                   initializer=self.kernel_initializer,
                                   name='kernel',
                                   regularizer=self.kernel_regularizer,
                                   constraint=self.kernel_constraint)
     if self.use_bias:
         self.bias = self.add_weight(shape=(output_row, output_col,
                                            self.filters),
                                     initializer=self.bias_initializer,
                                     name='bias',
                                     regularizer=self.bias_regularizer,
                                     constraint=self.bias_constraint)
     else:
         self.bias = None
     if self.data_format == 'channels_first':
         self.input_spec = InputSpec(ndim=4, axes={1: input_filter})
     else:
         self.input_spec = InputSpec(ndim=4, axes={-1: input_filter})
     self.built = True
 def build(self, input_shape):
     input_shape = tensor_shape.TensorShape(input_shape).as_list()
     assert len(input_shape) >= 3
     self.input_spec = InputSpec(shape=input_shape)
     child_input_shape = [input_shape[0]] + input_shape[2:]
     if not self.layer.built:
         self.layer.build(child_input_shape)
         self.layer.built = True
     super(TimeDistributed, self).build()
     self.built = True