示例#1
0
文件: fm.py 项目: notechats/notekeras
    def build(self, input_shape):
        self.kernel = self.add_weight(
            name='kernel',
            shape=(input_shape[1], self.field_num, self.factor_dim),
            initializer=tf.random_normal_initializer(),
            regularizer=l2(self.kernel_reg),
            trainable=True)

        self.weight = self.add_weight(
            name='weight',
            shape=(input_shape[1], 1),
            initializer=tf.random_normal_initializer(),
            regularizer=l2(self.weight_reg),
            trainable=True)

        self.bias = self.add_weight(name='bias',
                                    shape=(1, ),
                                    initializer=tf.zeros_initializer(),
                                    trainable=True)
        self.activate_layer = activations.get(self.activation)
示例#2
0
文件: keras.py 项目: DavidKWH/CommLib
    def __init__(self,
                 units,
                 activation=None,
                 kernel_initializer=None,
                 batch_normalization=False,
                 **kwargs):
        super(HyperDense, self).__init__(**kwargs)

        batch_norm = batch_normalization
        use_bias = not batch_normalization
        dense_linear_layer = partial(Dense,
                                     activation=None,
                                     use_bias=use_bias,
                                     kernel_initializer=kernel_initializer)
        batch_norm_layer = BatchNormalization

        self.dl_layer = dense_linear_layer(units)
        self.bn_layer = batch_norm_layer() if batch_norm else None
        self.activation = activations.get(activation)
        self.batch_norm = batch_norm
示例#3
0
    def __init__(self,
                 filters,
                 kernel_size,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 spectral_normalization=True,
                 bias_constraint=None,
                 **kwargs):
        if data_format is None:
            data_format = K.image_data_format()
        super(Conv2D, self).__init__(
            filters=filters,
            kernel_size=kernel_size,
            strides=strides,
            padding=padding,
            data_format=data_format,
            dilation_rate=dilation_rate,
            activation=activations.get(activation),
            use_bias=use_bias,
            kernel_initializer=initializers.get(kernel_initializer),
            bias_initializer=initializers.get(bias_initializer),
            kernel_regularizer=regularizers.get(kernel_regularizer),
            bias_regularizer=regularizers.get(bias_regularizer),
            activity_regularizer=regularizers.get(activity_regularizer),
            kernel_constraint=constraints.get(kernel_constraint),
            bias_constraint=constraints.get(bias_constraint),
            **kwargs)

        self.u = K.random_normal_variable(
            [1, filters], 0, 1, dtype=self.dtype, name="sn_estimate")  # [1, out_channels]
        self.spectral_normalization = spectral_normalization
示例#4
0
    def __init__(self,
                 head_num,
                 name="attention",
                 activation='relu',
                 use_bias=True,
                 kernel_initializer='glorot_normal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 history_only=False,
                 **kwargs):
        """Initialize the layer.

        :param head_num: Number of heads.
        :param activation: Activations for linear mappings.
        :param use_bias: Whether to use bias term.
        :param kernel_initializer: Initializer for linear mappings.
        :param bias_initializer: Initializer for linear mappings.
        :param kernel_regularizer: Regularizer for linear mappings.
        :param bias_regularizer: Regularizer for linear mappings.
        :param kernel_constraint: Constraints for linear mappings.
        :param bias_constraint: Constraints for linear mappings.
        :param history_only: Whether to only use history in attention layer.
        """
        self.supports_masking = True
        self.head_num = head_num
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.history_only = history_only

        self.Wq, self.Wk, self.Wv, self.Wo = None, None, None, None
        self.bq, self.bk, self.bv, self.bo = None, None, None, None
        super(MultiHeadAttention, self).__init__(name=name, **kwargs)
 def __init__(self,
              rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              spectral_normalization=True,
              **kwargs):
     super(_ConvSN, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = conv_utils.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(
         dilation_rate, rank, 'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
     self.spectral_normalization = spectral_normalization
     self.u = None
示例#6
0
    def __init__(self,
                 filters,
                 kernel_size,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 normalize=False,
                 offset=None,
                 in_channels=None,
                 **kwargs):

        from tensorflow.keras import activations, initializers, regularizers
        self.filters = filters
        self.kernel_size = kernel_size
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.normalize = normalize

        if not (np.asarray(kernel_size) == kernel_size[0]).all():
            raise Exception("Only cubic kernel sizes are supported.")

        if offset is None:
            if kernel_size[0] % 2:
                self.offset = tf.zeros(shape=(3, ))
            else:
                self.offset = tf.fill([3], -0.5)
        else:
            self.offset = offset

        self.fixed_radius_search = FixedRadiusSearch(metric='Linf',
                                                     ignore_query_point=False,
                                                     return_distances=False)

        super().__init__(**kwargs)
示例#7
0
文件: core.py 项目: nnnnwang/deepflow
    def __init__(self,
                 units,
                 dropout_list:list = None,
                 activation=None,
                 kernel_initializer='glorot_norm',
                 bias_initializers='zeros',
                 l2_reg_list=None,
                 use_bn=False,
                 use_gate=False,
                 **kwargs):

        self.units = list(map(int, units))
        self.dropout_list = dropout_list
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(get_initializer(kernel_initializer))
        self.bias_initializer = initializers.get(bias_initializers)
        self.l2_reg_list = l2_reg_list
        self.use_bn = use_bn
        self.use_gate = use_gate

        super(Mlp, self).__init__(**kwargs)
示例#8
0
  def __init__(self,
               exp_base: int,
               num_nodes: int,
               use_bias: Optional[bool] = True,
               activation: Optional[Text] = None,
               kernel_initializer: Optional[Text] = 'glorot_uniform',
               bias_initializer: Optional[Text] = 'zeros',
               **kwargs) -> None:

    if 'input_shape' not in kwargs and 'input_dim' in kwargs:
      kwargs['input_shape'] = (kwargs.pop('input_dim'),)

    super().__init__(**kwargs)

    self.exp_base = exp_base
    self.num_nodes = num_nodes
    self.nodes = []
    self.use_bias = use_bias
    self.activation = activations.get(activation)
    self.kernel_initializer = initializers.get(kernel_initializer)
    self.bias_initializer = initializers.get(bias_initializer)
示例#9
0
文件: fm.py 项目: notechats/notekeras
    def build(self, input_shape):
        self.kernel = self.add_weight(name='kernel',
                                      shape=(input_shape[1], self.factor_dim),
                                      initializer='glorot_uniform',
                                      regularizer=l2(self.kernel_reg),
                                      trainable=True)
        if self.use_weight:
            self.weight = self.add_weight(name='weight',
                                          shape=(input_shape[1],
                                                 self.output_dim),
                                          initializer='glorot_uniform',
                                          regularizer=l2(self.weight_reg),
                                          trainable=True)
        if self.use_bias:
            self.bias = self.add_weight(name='bias',
                                        shape=(self.output_dim, ),
                                        initializer='zeros',
                                        trainable=True)

        self.activate_layer = activations.get(self.activate)
        super(FactorizationMachine, self).build(input_shape)
示例#10
0
 def __init__(
     self,
     k,
     channels=None,
     return_selection=False,
     activation=None,
     kernel_initializer="glorot_uniform",
     kernel_regularizer=None,
     kernel_constraint=None,
     **kwargs
 ):
     super().__init__(
         return_selection=return_selection,
         kernel_initializer=kernel_initializer,
         kernel_regularizer=kernel_regularizer,
         kernel_constraint=kernel_constraint,
         **kwargs
     )
     self.k = k
     self.channels = channels
     self.activation = activations.get(activation)
    def __init__(self,
                 filters,
                 kernel_size,
                 kernel_initializer='glorot_uniform',
                 activation=None,
                 weights=None,
                 padding='valid',
                 strides=(1, 1),
                 data_format=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 use_bias=True,
                 **kwargs):
        if data_format is None:
            data_format = K.image_data_format()
        if padding not in {'valid', 'same', 'full'}:
            raise ValueError('Invalid border mode for CosineConvolution2D:',
                             padding)
        self.filters = filters
        self.kernel_size = kernel_size
        self.nb_row, self.nb_col = self.kernel_size
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.activation = activations.get(activation)
        self.padding = padding
        self.strides = tuple(strides)
        self.data_format = normalize_data_format(data_format)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.use_bias = use_bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(CosineConvolution2D, self).__init__(**kwargs)
示例#12
0
    def __init__(self,
                 units,
                 use_bias=True,
                 activation="linear",
                 gate_kernel_regularizer=None,
                 gate_activity_regularizer=None,
                 gate_bias_regularizer=None,
                 linear_kernel_regularizer=None,
                 linear_activity_regularizer=None,
                 linear_bias_regularizer=None,
                 name="gated_unit",
                 **kwargs):
        super(GatedUnit, self).__init__(name=name, **kwargs)
        self.units = units
        self.use_bias = use_bias
        self.activation = activations.get(activation)
        self.gate_kernel_regularizer = gate_kernel_regularizer
        self.gate_activity_regularizer = gate_activity_regularizer
        self.gate_bias_regularizer = gate_bias_regularizer
        self.linear_kernel_regularizer = linear_kernel_regularizer
        self.linear_activity_regularizer = linear_activity_regularizer
        self.linear_bias_regularizer = linear_bias_regularizer

        self.linear = layers.Dense(
            units,
            activation=activation,
            use_bias=use_bias,
            kernel_regularizer=linear_kernel_regularizer,
            activity_regularizer=linear_activity_regularizer,
            bias_regularizer=linear_bias_regularizer,
            name=f"{name}/linear")
        self.sigmoid = layers.Dense(
            units,
            activation="sigmoid",
            use_bias=True,
            kernel_regularizer=gate_kernel_regularizer,
            activity_regularizer=gate_activity_regularizer,
            bias_regularizer=gate_bias_regularizer,
            name=f"{name}/sigmoid")
 def __init__(self,
              activation=None,
              use_bias=False,
              real_kernel_initializer='uniform',
              imag_kernel_initializer='zeros',
              kernel_regularizer=None,
              bias_initializer='zeros',
              seed=None,
              **kwargs):
     super(Complex_deconv, self).__init__(**kwargs)
     self.use_bias = use_bias
     self.activation = activations.get(activation)
     self.real_kernel_initializer = initializers.get(
         real_kernel_initializer)
     self.imag_kernel_initializer = initializers.get(
         imag_kernel_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_initializer = initializers.get(bias_initializer)
     if seed == None:
         self.seed = np.random.randint(1, 10e6)
     else:
         self.seed = seed
示例#14
0
    def __init__(
        self,
        units,
        num_relationships,
        num_bases=0,
        activation=None,
        use_bias=True,
        final_layer=False,
        **kwargs
    ):
        if "input_shape" not in kwargs and "input_dim" in kwargs:
            kwargs["input_shape"] = (kwargs.get("input_dim"),)

        super().__init__(**kwargs)

        if not isinstance(num_bases, int):
            raise TypeError("num_bases should be an int")

        if not isinstance(units, int):
            raise TypeError("units should be an int")

        if units <= 0:
            raise ValueError("units should be positive")

        if not isinstance(num_relationships, int):
            raise TypeError("num_relationships should be an int")

        if num_relationships <= 0:
            raise ValueError("num_relationships should be positive")

        self.units = units
        self.num_relationships = num_relationships
        self.num_bases = num_bases
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self._get_regularisers_from_keywords(kwargs)
        self.final_layer = final_layer
        super().__init__(**kwargs)
示例#15
0
    def __init__(self,
                 filters,
                 kernel_size,
                 groups,
                 strides=(1, 1),
                 padding='valid',
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super(GroupConv2D, self).__init__(
            rank=2,
            filters=filters,
            kernel_size=kernel_size,
            groups=groups,
            strides=strides,
            padding=padding.upper(),
            data_format=data_format,
            dilation_rate=dilation_rate,
            activation=activations.get(activation),
            use_bias=use_bias,
            kernel_initializer=initializers.get(kernel_initializer),
            bias_initializer=initializers.get(bias_initializer),
            kernel_regularizer=regularizers.get(kernel_regularizer),
            bias_regularizer=regularizers.get(bias_regularizer),
            activity_regularizer=regularizers.get(activity_regularizer),
            kernel_constraint=constraints.get(kernel_constraint),
            bias_constraint=constraints.get(bias_constraint),
            **kwargs)
示例#16
0
    def __init__(self,
                 units,
                 concat=False,
                 use_bias=True,
                 agg_method='mean',
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(**kwargs)
        self.units = units
        self.concat = concat
        self.use_bias = use_bias
        self.agg_method = agg_method
        self.aggregator = {'mean': tf.reduce_mean, 'sum': tf.reduce_sum,
                           'max': tf.reduce_max, 'min': tf.reduce_min}[agg_method]
        self.activation = activations.get(activation)

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        if concat:
            self.output_dim = units * 2
        else:
            self.output_dim = units
示例#17
0
    def __init__(self, in_channels, out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01, use_bias=True):

        super().__init__()
        self.convs = []
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = GraphConv(inc, hidden, bias=use_bias,
                              activation=get(activation))
            self.convs.append(layer)
            inc = hidden

        layer = GraphConv(inc, out_channels, bias=use_bias)
        self.convs.append(layer)
        self.dropout = layers.Dropout(dropout)
        self.compile(loss=SparseCategoricalCrossentropy(from_logits=True),
                     optimizer=Adam(lr=lr), metrics=['accuracy'])
        self.weight_decay = weight_decay
        self.metric = SparseCategoricalAccuracy()
示例#18
0
    def __init__(self,
                 output_dim: int,
                 decomp_size: int,
                 use_bias: Optional[bool] = True,
                 activation: Optional[Text] = None,
                 kernel_initializer: Optional[Text] = 'glorot_uniform',
                 bias_initializer: Optional[Text] = 'zeros',
                 **kwargs) -> None:

        # Allow specification of input_dim instead of input_shape,
        # for compatability with Keras layers that support this
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )

        super().__init__(**kwargs)

        self.output_dim = output_dim
        self.decomp_size = decomp_size

        self.use_bias = use_bias
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
示例#19
0
 def __init__(self,
              units,
              m_w,
              m_b,
              activation=None,
              kernel_initializer='glorot_uniform',
              kernel_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(CBFD, self).__init__(**kwargs)
     self.units = units
     self.mw = m_w
     self.mb = m_b
     self.activation = activations.get(activation)
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
示例#20
0
    def __init__(self,
                 units,
                 use_bias=True,
                 agg_method='mean',
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        kwargs.pop('concat',
                   None)  # in order to be compatible with `MeanAggregator`
        super().__init__(**kwargs)
        self.units = units
        self.use_bias = use_bias
        self.agg_method = agg_method
        self.aggregator = {
            'mean': tf.reduce_mean,
            'sum': tf.reduce_sum,
            'max': tf.reduce_max,
            'min': tf.reduce_min
        }[agg_method]
        self.activation = activations.get(activation)

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
示例#21
0
 def __init__(
     self,
     channels,
     order=1,
     iterations=1,
     share_weights=False,
     gcn_activation="relu",
     dropout_rate=0.0,
     activation=None,
     use_bias=True,
     kernel_initializer="glorot_uniform",
     bias_initializer="zeros",
     kernel_regularizer=None,
     bias_regularizer=None,
     activity_regularizer=None,
     kernel_constraint=None,
     bias_constraint=None,
     **kwargs
 ):
     super().__init__(
         activation=activation,
         use_bias=use_bias,
         kernel_initializer=kernel_initializer,
         bias_initializer=bias_initializer,
         kernel_regularizer=kernel_regularizer,
         bias_regularizer=bias_regularizer,
         activity_regularizer=activity_regularizer,
         kernel_constraint=kernel_constraint,
         bias_constraint=bias_constraint,
         **kwargs
     )
     self.channels = channels
     self.iterations = iterations
     self.order = order
     self.share_weights = share_weights
     self.gcn_activation = activations.get(gcn_activation)
     self.dropout_rate = dropout_rate
示例#22
0
 def __init__(
     self,
     channels,
     epsilon=None,
     mlp_hidden=None,
     mlp_activation="relu",
     mlp_batchnorm=True,
     aggregate="sum",
     activation=None,
     use_bias=True,
     kernel_initializer="glorot_uniform",
     bias_initializer="zeros",
     kernel_regularizer=None,
     bias_regularizer=None,
     activity_regularizer=None,
     kernel_constraint=None,
     bias_constraint=None,
     **kwargs
 ):
     super().__init__(
         aggregate=aggregate,
         activation=activation,
         use_bias=use_bias,
         kernel_initializer=kernel_initializer,
         bias_initializer=bias_initializer,
         kernel_regularizer=kernel_regularizer,
         bias_regularizer=bias_regularizer,
         activity_regularizer=activity_regularizer,
         kernel_constraint=kernel_constraint,
         bias_constraint=bias_constraint,
         **kwargs
     )
     self.channels = channels
     self.epsilon = epsilon
     self.mlp_hidden = mlp_hidden if mlp_hidden else []
     self.mlp_activation = activations.get(mlp_activation)
     self.mlp_batchnorm = mlp_batchnorm
示例#23
0
 def __init__(self,
              filters,
              kernel_size,
              alpha=0.5,
              strides=1,
              padding='valid',
              data_format='channels_last',
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(OctConv1D, self).__init__(
         rank=1,
         filters=filters,
         kernel_size=kernel_size,
         alpha=alpha,
         strides=strides,
         padding=padding,
         data_format=data_format,
         dilation_rate=dilation_rate,
         activation=activations.get(activation),
         use_bias=use_bias,
         kernel_initializer=initializers.get(kernel_initializer),
         bias_initializer=initializers.get(bias_initializer),
         kernel_regularizer=regularizers.get(kernel_regularizer),
         bias_regularizer=regularizers.get(bias_regularizer),
         activity_regularizer=regularizers.get(activity_regularizer),
         kernel_constraint=constraints.get(kernel_constraint),
         bias_constraint=constraints.get(bias_constraint),
         **kwargs)
示例#24
0
 def __init__(self,
              state_sync=False,
              decode=False,
              output_length=None,
              return_states=False,
              readout=False,
              readout_activation='linear',
              teacher_force=False,
              state_initializer=None,
              **kwargs):
     self.state_sync = state_sync
     self.cells = []
     if decode and output_length is None:
         raise Exception('output_length should be specified for decoder')
     self.decode = decode
     self.output_length = output_length
     if decode:
         if output_length is None:
             raise Exception(
                 'output_length should be specified for decoder')
         kwargs['return_sequences'] = True
     self.return_states = return_states
     super(RecurrentModel, self).__init__(**kwargs)
     self.readout = readout
     self.readout_activation = activations.get(readout_activation)
     self.teacher_force = teacher_force
     self._optional_input_placeholders = {}
     if state_initializer:
         if type(state_initializer) in [list, tuple]:
             state_initializer = [
                 initializers.get(init)
                 if init else initializers.get('zeros')
                 for init in state_initializer
             ]
         else:
             state_initializer = initializers.get(state_initializer)
     self._state_initializer = state_initializer
示例#25
0
    def __init__(self,
                 units,
                 activation=None,
                 use_bias=True,
                 sigma_init=0.017,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 std_func=None,
                 sigma_func=None,
                 **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'),)

        super(NoisyDense, self).__init__(activity_regularizer=regularizers.get(activity_regularizer), **kwargs)

        self.units = int(units) if not isinstance(units, int) else units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.sigma_init = sigma_init
        self.std_func = std_func
        self.sigma_func = sigma_func
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.supports_masking = True
        self.input_spec = InputSpec(min_ndim=2)

        # pylint was complaining
        self.mu_weights = None
        self.sigma_weights = None
        self.mu_bias = None
        self.sigma_bias = None
示例#26
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              init_criterion='he',
              kernel_initializer='complex',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              seed=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(ComplexDense, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.init_criterion = init_criterion
     if kernel_initializer in {'complex'}:
         self.kernel_initializer = kernel_initializer
     else:
         self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     if seed is None:
         self.seed = np.random.randint(1, 10e6)
     else:
         self.seed = seed
     self.input_spec = InputSpec(ndim=2)
     self.supports_masking = True
示例#27
0
 def __init__(
     self,
     channels=256,
     batch_norm=True,
     dropout=0.0,
     aggregate="sum",
     activation="prelu",
     use_bias=True,
     kernel_initializer="glorot_uniform",
     bias_initializer="zeros",
     kernel_regularizer=None,
     bias_regularizer=None,
     activity_regularizer=None,
     kernel_constraint=None,
     bias_constraint=None,
     **kwargs
 ):
     super().__init__(
         aggregate=aggregate,
         activation=None,
         use_bias=use_bias,
         kernel_initializer=kernel_initializer,
         bias_initializer=bias_initializer,
         kernel_regularizer=kernel_regularizer,
         bias_regularizer=bias_regularizer,
         activity_regularizer=activity_regularizer,
         kernel_constraint=kernel_constraint,
         bias_constraint=bias_constraint,
         **kwargs
     )
     self.channels = channels
     self.dropout_rate = dropout
     self.use_batch_norm = batch_norm
     if activation == "prelu" or "prelu" in kwargs:
         self.activation = PReLU()
     else:
         self.activation = activations.get(activation)
示例#28
0
    def __init__(self, units,
                 use_bias=False,
                 activation=None,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(**kwargs)
        self.units = units
        self.use_bias = use_bias

        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
示例#29
0
    def __init__(self,
                 channels,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(activity_regularizer=activity_regularizer, **kwargs)
        self.channels = channels
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False
示例#30
0
    def __init__(self,
                 implementation=3,
                 kernel_regularizer=None,
                 activity_regularizer=None,
                 nonneg=False,
                 activation="linear",
                 use_bias=False,
                 name="local_lasso",
                 **kwargs):
        super(LocalLasso, self).__init__(name=name, **kwargs)
        self.implementation = implementation
        self.kernel_regularizer = kernel_regularizer
        self.activity_regularizer = activity_regularizer
        self.nonneg = nonneg
        self.activation = activations.get(activation)
        self.use_bias = use_bias

        if kernel_regularizer is None and (activity_regularizer is not None):
            kernel_regularizer = tf.keras.regularizers.L1(1e-3)

        if nonneg:
            weight_constraint = tf.keras.constraints.NonNeg()
        else:
            weight_constraint = None

        self.lasso = tf.keras.layers.LocallyConnected1D(
            1,
            1,
            activation=activation,
            implementation=implementation,
            use_bias=use_bias,
            kernel_regularizer=kernel_regularizer,
            activity_regularizer=activity_regularizer,
            kernel_constraint=weight_constraint,
            name=f"{name}/lasso")
        return