Exemplo n.º 1
0
 def build_with_input(self, input: Union[Nested[tf.TensorSpec],
                                         Nested[tf.Tensor]], *args: Any,
                      **kwargs: Any) -> None:
     bd = self.batch_dims
     self._input_spec = tf.nest.map_structure(
         lambda x: layers.InputSpec(shape=([None] * bd + x.shape[bd:])
                                    [:x.shape.ndims],
                                    dtype=x.dtype), input)
     dummy_input = tf.nest.map_structure(
         lambda x: tf.zeros((list(self.min_batch_shape) + x.shape[bd:])
                            [:x.shape.ndims], x.dtype), input)
     if 'mask' in kwargs:
         kwargs['mask'] = tf.ones(self.min_batch_shape, tf.bool)
     kwargs['training'] = False
     dummy_output = super().__call__(
         dummy_input, *args,
         **kwargs)  # type: ignore[misc]  # mypy/issues/5887
     # if isinstance(tf.nest.flatten(dummy_output)[0], tf.Tensor):
     if isinstance(dummy_output, tfd.Distribution):
         self._output_spec = layers.InputSpec(
             shape=[None] * len(dummy_output.batch_shape) +
             dummy_output.event_shape,
             dtype=dummy_output.dtype)
     else:
         self._output_spec = tf.nest.map_structure(
             lambda x: layers.InputSpec(shape=[None] * bd + x.shape[bd:],
                                        dtype=x.dtype), dummy_output)
     self.built_with_input = True
Exemplo n.º 2
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=1,
                 padding='valid',
                 dilation_rate=(1, 1),
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 demod=True,
                 **kwargs):

        super(ModulatedConv2D, self).__init__(**kwargs)

        self.filters = filters
        self.rank = 2
        self.kernel_size = kernel_size
        self.strides = strides
        self.padding = padding
        self.dilation_rate = dilation_rate
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.demod = demod
        self.input_spec = [layers.InputSpec(ndim=4), layers.InputSpec(ndim=2)]
        self.epsilon = 1e-8
Exemplo n.º 3
0
    def build(self, *args, **kwargs):
        assert self.built == False

        super().build(*args, **kwargs)
        self.input_spec = [
            self.input_spec,
            layers.InputSpec(self.kernel.dtype, self.kernel.get_shape()),
            layers.InputSpec(self.bias.dtype, self.bias.get_shape())
        ]
Exemplo n.º 4
0
 def build_with_input(self, input: Nested[tf.Tensor], *args: Any,
                      **kwargs: Any) -> None:
     bd = self.batch_dims
     self._input_spec = tf.nest.map_structure(
         lambda x: tfkl.InputSpec(shape=[None] * bd + x.shape[bd:],
                                  dtype=x.dtype), input)
     dummy_input = tf.nest.map_structure(
         lambda t: tf.zeros([2] * bd + t.shape[bd:], t.dtype), input)
     dummy_output = super().__call__(dummy_input, *args, **kwargs)
     self._output_spec = tf.nest.map_structure(
         lambda x: tfkl.InputSpec(shape=[None] * bd + x.shape[bd:],
                                  dtype=x.dtype), dummy_output)
     self.built_with_input = True
Exemplo n.º 5
0
 def __init__(self, units,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              tied_to=None,
              **kwargs):
     self.tied_to = tied_to
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super().__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = layers.InputSpec(min_ndim=2)
     self.supports_masking = True
Exemplo n.º 6
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
            
        self.u = tf.Variable(
            tf.random.normal((tuple([1, self.kernel.shape.as_list()[-1]])), dtype=tf.float32)
        , aggregation=tf.VariableAggregation.MEAN, trainable=False)
        
        # Set input spec.
        self.input_spec = layers.InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
    def build(self, input_shape):
        ndim = len(input_shape)
        if self.axis == 0:
            raise ValueError('Axis cannot be zero')

        if (self.axis is not None) and (ndim == 2):
            raise ValueError('Cannot specify axis for rank 1 tensor')

        self.input_spec = layers.InputSpec(ndim=ndim)

        if self.axis is None:
            shape = (1, )
        else:
            shape = (input_shape[self.axis], )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None
        self.built = True
 def __init__(self, pad=1, **kwargs):
     if type(pad) == int:
         self.pad = (pad, pad)
     else:
         self.pad = tuple(pad)
     self.input_spec = [layers.InputSpec(ndim=4)]
     super(ReflectionPadding2D, self).__init__(**kwargs)
Exemplo n.º 9
0
    def build(self, input_shape):

        kernalShape = self.kernel_size + (input_shape[-1], self.filters)
        self.kernel = self.add_weight(shape=kernalShape,
                                      initializer=self.kernel_initializer,
                                      name='kernal',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.use_bias:
            self.bias = self.add_weight(shape=(self.filters, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None

        self.u = self.add_weight(
            shape=tuple([1, self.kernel.shape.as_list()[-1]]),
            initializer=ks.initializers.RandomNormal(0, 1),
            name='sn_u',
            trainable=False)

        self.input_spec = lr.InputSpec(ndim=self.rank + 2,
                                       axes={-1: input_shape[-1]})
        self.build = True
        return
Exemplo n.º 10
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        if input_dim % self.num_group != 0:
            raise ValueError("The channel dimension of input tensor must divided by num_group with no remainders!")

        kernel_shape = self.kernel_size + (input_dim // self.num_group, self.filters)
        self.kernel = self.add_weight(name='kernel',
                                      shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.filters,),
                                        initializer=self.bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        # Set input spec.
        self.input_spec = layers.InputSpec(ndim=self.rank + 2,
                                           axes={channel_axis: input_dim})
        self.built = True
        self.channel_num = input_dim
Exemplo n.º 11
0
    def build(self, input_shape=None):
        self.input_spec = layers.InputSpec(shape=input_shape)
        if hasattr(self.layer, 'built') and not self.layer.built:
            self.layer.build(input_shape)

        # initialise p
        self.p_logit = self.add_variable(
            name='p_logit',
            shape=(1, ),
            initializer=tf.initializers.random_uniform(minval=self.init_min,
                                                       maxval=self.init_max,
                                                       dtype=tf.float32),
            dtype=tf.float32,
            trainable=True)
        self.p = tf.nn.sigmoid(self.p_logit[0], name='dropout_rate')
        tf.summary.scalar('dropoutRate', self.p)
        tf.add_to_collection("LAYER_P", self.p)

        # initialise regulariser / prior KL term
        input_dim = int(np.prod(input_shape[1:]))

        weight = self.layer.kernel
        with tf.name_scope('dropout_regularizer'):
            kernel_regularizer = self.weight_regularizer * tf.reduce_sum(
                tf.square(weight)) / (1. - self.p)
            dropout_regularizer = self.p * tf.log(self.p)
            dropout_regularizer += (1. - self.p) * tf.log(1. - self.p)
            dropout_regularizer *= self.dropout_regularizer * input_dim
            regularizer = tf.reduce_sum(kernel_regularizer +
                                        dropout_regularizer)
            # Add the regularisation loss to collection.
            tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
                                 regularizer)
Exemplo n.º 12
0
    def build(self, input_shape):
        dim = input_shape[self.axis]

        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')

        self.input_spec = KL.InputSpec(ndim=len(input_shape),
                                       axes={self.axis: dim})
        shape = (dim, )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None

        self.moving_mean = self.add_weight(
            shape=shape,
            name='moving_mean',
            initializer=self.moving_mean_initializer,
            trainable=False)

        self.moving_variance = self.add_weight(
            shape=shape,
            name='moving_variance',
            initializer=self.moving_variance_initializer,
            trainable=False)

        self.mean_weights = self.add_weight(
            shape=(3, ),
            name='mean_weights',
            initializer=self.mean_weights_initializer,
            regularizer=self.mean_weights_regularizer,
            constraint=self.mean_weights_constraints)

        self.variance_weights = self.add_weight(
            shape=(3, ),
            name='variance_weights',
            initializer=self.variance_weights_initializer,
            regularizer=self.variance_weights_regularizer,
            constraint=self.variance_weights_constraints)

        self.built = True
Exemplo n.º 13
0
    def __init__(self,
                 units,
                 activation='tanh',
                 recurrent_activation='sigmoid',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 unit_forget_bias=True,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=1,
                 return_sequences=False,
                 return_state=False,
                 go_backwards=False,
                 stateful=False,
                 unroll=False,
                 **kwargs):

        cell = LSTMCell(units,
                        activation=activation,
                        recurrent_activation=recurrent_activation,
                        use_bias=use_bias,
                        kernel_initializer=kernel_initializer,
                        recurrent_initializer=recurrent_initializer,
                        unit_forget_bias=unit_forget_bias,
                        bias_initializer=bias_initializer,
                        kernel_regularizer=kernel_regularizer,
                        recurrent_regularizer=recurrent_regularizer,
                        bias_regularizer=bias_regularizer,
                        kernel_constraint=kernel_constraint,
                        recurrent_constraint=recurrent_constraint,
                        bias_constraint=bias_constraint,
                        dropout=dropout,
                        recurrent_dropout=recurrent_dropout,
                        implementation=implementation,
                        dtype=kwargs.get('dtype'),
                        trainable=kwargs.get('trainable', True))
        keras_layers.RNN.__init__(self,
                                  cell,
                                  return_sequences=return_sequences,
                                  return_state=return_state,
                                  go_backwards=go_backwards,
                                  stateful=stateful,
                                  unroll=unroll,
                                  **kwargs)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.input_spec = [keras_layers.InputSpec(ndim=3)]
Exemplo n.º 14
0
 def build_with_input(self, inputs, *args, **kwargs):
     bd = self._batch_dims
     # self._input_spec = [tf.nest.map_structure(
     #     lambda x: tfkl.InputSpec(shape=[None] * bd + x.shape[bd:], dtype=x.dtype), inputs)]
     dummy_input = tf.nest.map_structure(
         lambda t: tf.zeros([2] * bd + t.shape[bd:], t.dtype), inputs)
     dummy_output = super().__call__(dummy_input, *args, **kwargs)
     self._output_spec = tf.nest.map_structure(
         lambda x: tfkl.InputSpec(shape=[None] * bd + x.shape[bd:],
                                  dtype=x.dtype), dummy_output)
     self._built_with_input = True
Exemplo n.º 15
0
    def build(self, input_shape):
        channels = input_shape[-1]
        if channels is None:
            raise ValueError(
                'Channel dimension of inputs should be defined. Found `None`.')
        self.input_spec = layers.InputSpec(ndim=3, axes={-1: channels})

        self.make_query = layers.Dense(self.units, use_bias=False)
        self.make_key = layers.Dense(self.units)
        self.make_score = layers.Dense(1, activation='sigmoid')

        super(AdditiveSelfAttention,
              self).build([input_shape, input_shape, input_shape])
Exemplo n.º 16
0
    def build(self, input_shape):
        channels = input_shape[-1]
        if channels is None:
            raise ValueError(
                'Channel dimension of inputs should be defined. Found `None`.')
        self.input_spec = layers.InputSpec(ndim=3, axes={-1: channels})

        self.make_query = layers.Dense(channels, use_bias=False)
        self.att_bias = self.add_weight('bias',
                                        shape=(1, ),
                                        initializer='zeros',
                                        dtype=self.dtype,
                                        trainable=True)

        super(MultiplicativeSelfAttention,
              self).build([input_shape, input_shape, input_shape])
Exemplo n.º 17
0
 def __init__(self, output_dim, sparse_target=True, **kwargs):
     """    
     Args:
         output_dim (int): the number of labels to tag each temporal input.
         sparse_target (bool): whether the the ground-truth label represented in one-hot.
     Input shape:
         (batch_size, sentence length, output_dim)
     Output shape:
         (batch_size, sentence length, output_dim)
     """
     super(CRF, self).__init__(**kwargs)
     self.output_dim = int(output_dim)
     self.sparse_target = sparse_target
     self.input_spec = L.InputSpec(min_ndim=3)
     self.supports_masking = False
     self.sequence_lengths = None
     self.transitions = None
Exemplo n.º 18
0
    def build(self, input_shape):
        assert len(input_shape) == 3
        f_shape = tf.TensorShape(input_shape)
        input_spec = L.InputSpec(min_ndim=3, axes={-1: f_shape[-1]})

        if f_shape[-1] is None:
            raise ValueError('The last dimension of the inputs to `CRF` '
                             'should be defined. Found `None`.')
        if f_shape[-1] != self.output_dim:
            raise ValueError('The last dimension of the input shape must be equal to output'
                             ' shape. Use a linear layer if needed.')
        self.input_spec = input_spec
        self.transitions = self.add_weight(name='transitions',
                                           shape=[self.output_dim, self.output_dim],
                                           initializer='glorot_uniform',
                                           trainable=True)
        self.built = True
Exemplo n.º 19
0
    def build(self, input_shape=None):
        channels = input_shape[-1]
        if channels is None:
            raise ValueError('Channel dimension of the inputs should be defined. Found `None`.')
        self.input_spec = layers.InputSpec(ndim=max(2, len(input_shape)), axes={-1: channels})

        kernel_initializer = initializers.random_normal(mean=0., stddev=np.sqrt(1. / channels))
        self.carry = layers.Dense(
            channels,
            kernel_initializer=kernel_initializer,
            bias_initializer=initializers.constant(-2.),
            activation='sigmoid')
        self.transform = layers.Dense(
            channels,
            kernel_initializer=kernel_initializer,
            activation='relu')

        super().build(input_shape)
Exemplo n.º 20
0
 def build(self, input_shape: KTensorShape):
     if len(input_shape) < 2:
         raise ValueError
     input_dim = input_shape[-1]
     self.mean_kernel = self.add_weight(shape=(input_dim, self.units),
                                        initializer=self.kernel_initializer,
                                        name='mean_kernel')
     self.mean_bias = self.add_weight(shape=(self.units, ),
                                      initializer=self.bias_initializer,
                                      name='mean_bias')
     self.std_kernel = self.add_weight(shape=(input_dim, self.units),
                                       initializer=self.kernel_initializer,
                                       name='std_kernel')
     self.std_bias = self.add_weight(shape=(self.units, ),
                                     initializer=self.bias_initializer,
                                     name='std_bias')
     self.input_spec = layers.InputSpec(min_ndim=2, axes={-1: input_dim})
     super().build(input_shape)
Exemplo n.º 21
0
    def __init__(self,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        self.input_spec = layers.InputSpec(ndim=3)
        self.supports_masking = True

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(SelfAttentionWithContext, self).__init__(**kwargs)
Exemplo n.º 22
0
    def __init__(self, vocabulary, output_dim, normalize_unicode='NFKC', lower_case=False, zero_digits=False,
                 max_len=None, reserved_words=None, embed_type='dense_auto', adapt_cutoff=None, adapt_factor=4,
                 embeddings_initializer='uniform', **kwargs):
        super().__init__(**kwargs)
        self.input_spec = layers.InputSpec(min_ndim=1, max_ndim=2, dtype='string')

        if not isinstance(vocabulary, list) or not all(map(lambda x: isinstance(x, str), vocabulary)):
            raise ValueError('Expected "vocabulary" to be a list of strings')
        if len(vocabulary) != len(set(vocabulary)):
            raise ValueError('Expected "vocabulary" to contain unique values')
        self.vocabulary = vocabulary

        self.output_dim = output_dim
        self.normalize_unicode = normalize_unicode
        self.lower_case = lower_case
        self.zero_digits = zero_digits

        if max_len is not None and max_len < 3:
            raise ValueError('Expected "max_len" to be None or greater then 2')
        self.max_len = max_len

        if reserved_words and len(reserved_words) != len(set(reserved_words)):
            raise ValueError('Expected "reserved_words" to contain unique values')
        self.reserved_words = reserved_words

        if embed_type not in {'dense_auto', 'dense_cpu', 'adapt'}:
            raise ValueError('Expected "embed_type" to be one of "dense_auto", "dense_cpu" or "adapt"')
        self.embed_type = embed_type

        self.adapt_cutoff = adapt_cutoff
        self.adapt_factor = adapt_factor
        self.embeddings_initializer = initializers.get(embeddings_initializer)

        all_reserved_words = [] if reserved_words is None else [r for r in reserved_words if self.UNK_MARK != r]
        self._reserved_words = [self.UNK_MARK] + all_reserved_words

        miss_reserved_words = [m for m in self._reserved_words if m not in vocabulary]
        if miss_reserved_words:
            tf.get_logger().warning('Vocabulary missed some reserved_words values: {}. '
                                    'This may indicate an error in vocabulary estimation'.format(miss_reserved_words))

        clean_vocab = [w for w in vocabulary if w not in self._reserved_words]
        self._vocabulary = self._reserved_words + clean_vocab
Exemplo n.º 23
0
 def __init__(self, filters,
              kernel_size,
              strides=(1, 1),
              padding='valid',
              data_format='channels_last',
              dilation_rate=(1, 1),
              num_group=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(GroupConv2D, self).__init__(
         filters=filters,
         kernel_size=kernel_size,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=activation,
         use_bias=use_bias,
         kernel_initializer=kernel_initializer,
         bias_initializer=bias_initializer,
         kernel_regularizer=kernel_regularizer,
         bias_regularizer=bias_regularizer,
         activity_regularizer=activity_regularizer,
         kernel_constraint=kernel_constraint,
         bias_constraint=bias_constraint,
         **kwargs)
     self.num_group = num_group
     if self.filters % self.num_group != 0:
         raise ValueError("filters must divided by num_group with no remainders!")
     self.input_spec = layers.InputSpec(ndim=4)
Exemplo n.º 24
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        if self.tied_to is not None:
            self.kernel = backend.transpose(self.tied_to.kernel)
            self._non_trainable_weights.append(self.kernel)
        else:
            self.kernel = self.add_weight(shape=(input_dim, self.units),
                                          initializer=self.kernel_initializer,
                                          name='kernel',
                                          regularizer=self.kernel_regularizer,
                                          constraint=self.kernel_constraint)
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = layers.InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
Exemplo n.º 25
0
    def build(self, input_shape):
        channels = input_shape[-1]
        if channels is None:
            raise ValueError(
                'Channel dimension of inputs should be defined. Found `None`.')
        self.input_spec = layers.InputSpec(ndim=3, axes={-1: channels})

        self.representation = layers.Dense(
            channels,
            kernel_initializer=self.kernel_initializer,
            bias_initializer=self.bias_initializer,
            kernel_regularizer=self.kernel_regularizer,
            bias_regularizer=self.bias_regularizer,
            kernel_constraint=self.kernel_constraint,
            bias_constraint=self.bias_constraint)
        self.importance = layers.Dense(
            channels,
            use_bias=False,
            kernel_initializer=self.kernel_initializer,
            kernel_regularizer=self.kernel_regularizer,
            kernel_constraint=self.kernel_constraint,
        )

        super(SelfAttentionWithContext, self).build(input_shape)
Exemplo n.º 26
0
 def __init__(self, padding: Tuple[int, int] = (1, 1)) -> None:
     super().__init__()
     self.padding = tuple(padding)
     self.input_spec = [layers.InputSpec(ndim=4)]
Exemplo n.º 27
0
    def __init__(self, units, **kwargs):
        super(AdditiveSelfAttention, self).__init__(**kwargs)
        self.input_spec = layers.InputSpec(ndim=3)

        self.units = units
Exemplo n.º 28
0
 def __init__(self, **kwargs):
     super(MultiplicativeSelfAttention, self).__init__(**kwargs)
     self.input_spec = layers.InputSpec(ndim=3)
Exemplo n.º 29
0
    def build(self, input_shape):
        # input_shape = (batch_size, seq_len)
        self.seq_len = input_shape[-1]
        # shape = (T, D, seq_len)

        self.input_spec = layers.InputSpec(min_ndim=2, axes={-1: self.seq_len})
Exemplo n.º 30
0
 def __init__(self, paddings=(1, 1), **kwargs):
     self.paddings = tuple(paddings)
     self.input_spec = [layers.InputSpec(ndim=4)]
     super(ReflectionPad2d, self).__init__(**kwargs)