Esempio n. 1
0
 def build(self, input_shape):
     self.input_spec = [InputSpec(ndim=3)]
     assert len(input_shape) == 3
     self.W = self.add_weight(shape=(input_shape[2], 1),
                              name='{}_W'.format(self.name),
                              initializer=self.init)
     # self.trainable_weights = [self.W]
     super(AttentionWeightedAverage, self).build(input_shape)
Esempio n. 2
0
 def __init__(self, n_shapelets, X=None, **kwargs):
     self.n_shapelets = n_shapelets
     if X is None:
         self.initializer = "uniform"
     else:
         self.initializer = KMeansShapeletInitializer(X)
     super().__init__(**kwargs)
     self.input_spec = InputSpec(ndim=3)
 def build(self, input_shape):
     super(ADLayer, self).build(input_shape[0])
     # must be set here also and be more specific
     self.input_spec = [
         InputSpec(dtype=tf.float32, shape=(None, input_shape[0][-1])),
         InputSpec(dtype=tf.float32,
                   shape=(None, self.context_vector_length))
     ]
     self.dendrits = self.add_weight(
         "dendrits",
         shape=[
             self.context_vector_length, self.dendrits_count,
             self.neurons_count
         ],
         trainable=True,
         initializer=tf.keras.initializers.RandomUniform(minval=-1,
                                                         maxval=1))
 def __init__(self,
              units,
              dendrits_count,
              context_vector_length,
              use_abs_max=False,
              **kwargs) -> None:
     super(ADLayer, self).__init__(units, **kwargs)
     self.neurons_count = units
     self.dendrits_count = dendrits_count
     self.context_vector_length = context_vector_length
     self.use_abs_max = use_abs_max
     # important to override Dense layer's input spec
     self.input_spec = [
         InputSpec(dtype=tf.float32, ndim=2),
         InputSpec(dtype=tf.float32,
                   shape=(None, self.context_vector_length))
     ]
Esempio n. 5
0
 def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(ClusteringLayer, self).__init__(**kwargs)
     self.n_clusters = n_clusters
     self.alpha = alpha
     self.initial_weights = weights
     self.input_spec = InputSpec(ndim=2)
Esempio n. 6
0
    def build(self, input_shape):
        self.bert_input_shape = (None, None, 768)
        self.entity1_shape = (None, 128)
        self.entity2_shape = (None, 128)
        self.input_spec = [InputSpec(ndim=3)]

        super(AverageConnectLayer, self).build([(None, None, 768), (None, 128),
                                                (None, 128)])
    def __init__(self, output_dim, factor_dim, activation=None, **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(FMLayer, self).__init__(**kwargs)

        self.output_dim = output_dim
        self.factor_order = factor_dim
        self.activation = activations.get(activation)
        self.input_spec = InputSpec(ndim=2)
Esempio n. 8
0
 def build(self, input_shape):
     assert len(input_shape) == 2
     input_dim = input_shape[1]
     self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
     self.clusters = self.add_weight(shape=None, initializer='glorot_uniform', name='clusters')
     if self.initial_weights is not None:
         self.set_weights(self.initial_weights)
         del self.initial_weights
     self.built = True
Esempio n. 9
0
    def build(self, input_shape):
        dim = input_shape[self.axis]

        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')

        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})
        shape = (dim, )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None

        self.moving_mean = self.add_weight(
            shape=shape,
            name='moving_mean',
            initializer=self.moving_mean_initializer,
            trainable=False)

        self.moving_variance = self.add_weight(
            shape=shape,
            name='moving_variance',
            initializer=self.moving_variance_initializer,
            trainable=False)

        self.mean_weights = self.add_weight(
            shape=(3, ),
            name='mean_weights',
            initializer=self.mean_weights_initializer,
            regularizer=self.mean_weights_regularizer,
            constraint=self.mean_weights_constraints)

        self.variance_weights = self.add_weight(
            shape=(3, ),
            name='variance_weights',
            initializer=self.variance_weights_initializer,
            regularizer=self.variance_weights_regularizer,
            constraint=self.variance_weights_constraints)

        self.built = True
Esempio n. 10
0
    def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        if not input_shape.ndims:
            raise ValueError('Input has undefined rank:', input_shape)
        ndims = len(input_shape)

        # Convert axis to list and resolve negatives
        if isinstance(self.axis, int):
            self.axis = [self.axis]

        for idx, x in enumerate(self.axis):
            if x < 0:
                self.axis[idx] = ndims + x

        # Validate axes
        for x in self.axis:
            if x < 0 or x >= ndims:
                raise ValueError('Invalid axis: %d' % x)
        if len(self.axis) != len(set(self.axis)):
            raise ValueError('Duplicate axis: %s' % self.axis)

        axis_to_dim = {x: input_shape.dims[x].value for x in self.axis}
        for x in axis_to_dim:
            if axis_to_dim[x] is None:
                raise ValueError(
                    'Input has undefined `axis` dimension. Input shape: ',
                    input_shape)
        self.input_spec = InputSpec(ndim=ndims, axes=axis_to_dim)

        if len(axis_to_dim) == 1:
            # Single axis batch norm (most common/default use-case)
            param_shape = (list(axis_to_dim.values())[0], )
        else:
            # Parameter shape is the original shape but with 1 in all non-axis dims
            param_shape = [
                axis_to_dim[i] if i in axis_to_dim else 1 for i in range(ndims)
            ]

        if self.scale:
            self.gamma = self.add_weight(name='gamma',
                                         shape=param_shape,
                                         dtype=self._param_dtype,
                                         initializer=self.gamma_initializer,
                                         trainable=self.trainable,
                                         experimental_autocast=False)
        else:
            self.gamma = None

        if self.center:
            self.beta = self.add_weight(name='beta',
                                        shape=param_shape,
                                        dtype=self._param_dtype,
                                        initializer=self.beta_initializer,
                                        trainable=self.trainable,
                                        experimental_autocast=False)
        else:
            self.beta = None
Esempio n. 11
0
 def __init__(self, factor=(2, 2), data_format='channels_last', interpolation='nearest', **kwargs):
     super(ResizeImage, self).__init__(**kwargs)
     self.data_format = data_format
     self.factor = conv_utils.normalize_tuple(factor, 2, 'factor')
     self.input_spec = InputSpec(ndim=4)
     if interpolation not in ['nearest', 'bilinear']:
         raise ValueError('interpolation should be one '
                          'of "nearest" or "bilinear".')
     self.interpolation = interpolation
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        output_shape = (int(input_shape[self.axis]),)

        gamma_initializer = initializers.Ones()
        beta_initializer = initializers.Zeros()

        self.gamma = K.variable(gamma_initializer(output_shape))
        self.beta = K.variable(beta_initializer(output_shape))
        self.trainable_weights = [self.gamma, self.beta]
Esempio n. 13
0
    def build(self, input_shape):
        """Creates the layer weights.

        Args:
            input_shape (list(tuple, tuple)): [(batch_size, n_steps, n_classes), (batch_size, 1)]
        """
        assert len(input_shape) == 2
        assert len(input_shape[0]) == 3
        assert len(input_shape[1]) == 2
        n_steps = input_shape[0][1]
        n_classes = input_shape[0][2]
        assert n_steps is None or n_steps >= 2

        self.transition_params = self.add_weight(shape=(n_classes, n_classes),
                                                 initializer='uniform',
                                                 name='transition')
        self.input_spec = [InputSpec(dtype=K.floatx(), shape=(None, n_steps, n_classes)),
                           InputSpec(dtype='int32', shape=(None, 1))]
        self.built = True
Esempio n. 14
0
    def build(self, input_shape):
        # This currently only works for 4D inputs: assuming (B, H, W, C)
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (self.nb_classes, 1, 1, input_shape[-1])

        self.gamma = self.gamma_init(shape, name='{}_gamma'.format(self.name))
        self.beta = self.beta_init(shape, name='{}_beta'.format(self.name))
        self.trainable_weights = [self.gamma, self.beta]

        self.built = True
Esempio n. 15
0
    def build(self, input_shape):
        """
        Method for creating the layer weights.

        :param input_shape: Keras tensor (future input to layer)
                            or list/tuple of Keras tensors to reference
                            for weight shape computations
        """
        assert input_shape is not None and len(input_shape) >= 2

        input_dimension = input_shape[-1]

        # Initialize expert weights (number of input features * number of units per expert * number of experts)
        self.expert_kernels = self.add_weight(
            name='expert_kernel',
            shape=(input_dimension, self.units, self.num_experts),
            initializer=self.expert_kernel_initializer,
            regularizer=self.expert_kernel_regularizer,
            constraint=self.expert_kernel_constraint,
        )

        # Initialize expert bias (number of units per expert * number of experts)
        if self.use_expert_bias:
            self.expert_bias = self.add_weight(
                name='expert_bias',
                shape=(self.units, self.num_experts),
                initializer=self.expert_bias_initializer,
                regularizer=self.expert_bias_regularizer,
                constraint=self.expert_bias_constraint,
            )

        # Initialize gate weights (number of input features * number of experts * number of tasks)
        self.gate_kernels = [
            self.add_weight(name='gate_kernel_task_{}'.format(i),
                            shape=(input_dimension, self.num_experts),
                            initializer=self.gate_kernel_initializer,
                            regularizer=self.gate_kernel_regularizer,
                            constraint=self.gate_kernel_constraint)
            for i in range(self.num_tasks)
        ]

        # Initialize gate bias (number of experts * number of tasks)
        if self.use_gate_bias:
            self.gate_bias = [
                self.add_weight(name='gate_bias_task_{}'.format(i),
                                shape=(self.num_experts, ),
                                initializer=self.gate_bias_initializer,
                                regularizer=self.gate_bias_regularizer,
                                constraint=self.gate_bias_constraint)
                for i in range(self.num_tasks)
            ]

        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dimension})

        super(MMoE, self).build(input_shape)
Esempio n. 16
0
    def build(self, input_shape):
        dim = input_shape[self.axis]
        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')
        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})
        shape = (dim, )

        if self.scale:
            self.gamma = self.add_weight(shape,
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint,
                                         name='{}_gamma'.format(self.name))
        else:
            self.gamma = None

        if self.center:
            self.beta = self.add_weight(shape,
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint,
                                        name='{}_beta'.format(self.name))
        else:
            self.beta = None

        self.running_mean = self.add_weight(
            shape,
            initializer=self.moving_mean_initializer,
            name='{}_running_mean'.format(self.name),
            trainable=False)

        self.running_variance = self.add_weight(
            shape,
            initializer=self.moving_variance_initializer,
            name='{}_running_std'.format(self.name),
            trainable=False)

        self.r_max = K.variable(np.ones((1, )),
                                name='{}_r_max'.format(self.name))

        self.d_max = K.variable(np.zeros((1, )),
                                name='{}_d_max'.format(self.name))

        self.t = K.variable(np.zeros((1, )), name='{}_t'.format(self.name))

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights

        self.built = True
Esempio n. 17
0
    def __init__(self,
                 number_of_clusters=10,
                 initial_cluster_weights=None,
                 alpha=1.0,
                 **kwargs):
        super(DeepEmbeddedClustering, self).__init__(**kwargs)

        self.number_of_clusters = number_of_clusters
        self.initial_cluster_weights = initial_cluster_weights
        self.alpha = alpha
        self.input_spec = InputSpec(ndim=2)
Esempio n. 18
0
 def build(self, input_shape):
     assert len(input_shape) == 2
     input_dim = input_shape[1]
     self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
     self.clusters = self.add_weight(name="clusters",
                                     shape=(self.n_clusters, input_dim),
                                     initializer="glorot_uniform")
     if self.initial_weights is not None:
         self.set_weights(self.initial_weights)
         # TODO Why delete?
         del self.initial_weights
     self.built = True
Esempio n. 19
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(ndim=3)]
        assert len(input_shape) == 3

        self.W = self.add_weight(shape=(input_shape[2], 1),
                                 name='{}_W'.format(self.name),
                                 initializer=self.init)
        # self.trainable_weights = [self.W]
        self._trainable_weights = [
            self.W
        ]  # https://github.com/pierluigiferrari/ssd_keras/issues/322
        super(AttentionWeightedAverage, self).build(input_shape)
    def build(self, input_shape):
        channel_axis = -1
        if input_shape[0][channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[0][channel_axis]
        kernel_shape = self.kernel_size + (input_dim, self.filters)

        if input_shape[1][-1] != input_dim:
            raise ValueError('The last dimension of modulation input should be equal to input dimension.')

        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        # Set input spec.
        self.input_spec = [InputSpec(ndim=4, axes={channel_axis: input_dim}),
                            InputSpec(ndim=2)]
        self.built = True
Esempio n. 21
0
 def __init__(self, elementwise=False, name=None, **kwargs):
     super(AutoAugment, self).__init__(name=name, **kwargs)
     self.elementwise = elementwise
     self.transforms = [
         tf.keras.Sequential([
             image_augmentations.RandomChance(_get_transform(t1, m1), p1),
             image_augmentations.RandomChance(_get_transform(t2, m2), p2),
         ]) for (t1, p1, m1), (t2, p2, m2) in _AUTO_AUGMENT_POLICY_V0
     ]
     self._transform = image_augmentations.RandomChoice(
         self.transforms, n_transforms=1, elementwise=elementwise)
     self.input_spec = InputSpec(ndim=4, dtype=tf.uint8)
Esempio n. 22
0
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        shape = (int(input_shape[self.axis]), )

        self.gamma = K.variable(self.gamma_init(shape),
                                name='{}_gamma'.format(self.name))
        self.beta = K.variable(self.beta_init(shape),
                               name='{}_beta'.format(self.name))
        self._trainable_weights = [self.gamma, self.beta]
        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
Esempio n. 23
0
    def __init__(self, n_clusters, weights=None, alpha=1.0, **kwargs):
        '''Defines custom layer attributes, and creates layer state
        variables that do not depend on input shapes, using `add_weight()`
        '''
        self.n_clusters = n_clusters
        self.alpha = alpha
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(DECLayer, self).__init__(**kwargs)
Esempio n. 24
0
 def __init__(self,
              index=None,
              coordinate_scale=1.0,
              confidence_scale=1.0,
              data_format=None,
              **kwargs):
     super(Maxima2D, self).__init__(**kwargs)
     self.data_format = normalize_data_format(data_format)
     self.input_spec = InputSpec(ndim=4)
     self.index = index
     self.coordinate_scale = coordinate_scale
     self.confidence_scale = confidence_scale
Esempio n. 25
0
 def __init__(
         self,
         height,
         width,
         interpolation='bilinear',
         # name='Upsample',
         **kwargs):
     self.target_height = height
     self.target_width = width
     self.interpolation = interpolation
     self.input_spec = InputSpec(ndim=4)
     super(Upsample, self).__init__(**kwargs)
Esempio n. 26
0
 def __init__(self, target_shape=None, data_format=None, **kwargs):
     if data_format is None:
         data_format = K.image_data_format()
     assert data_format in {'channels_last', 'channels_first'}
     self.data_format = data_format
     self.input_spec = [InputSpec(ndim=4)]
     self.target_shape = target_shape
     if self.data_format == 'channels_first':
         self.target_size = (target_shape[2], target_shape[3])
     elif self.data_format == 'channels_last':
         self.target_size = (target_shape[1], target_shape[2])
     super(BilinearUpSampling2D, self).__init__(**kwargs)
Esempio n. 27
0
    def build(self, input_shape):
        print('input_shape:', input_shape)
        # assert isinstance(input_shape, list) and len(input_shape) ==
        assert len(input_shape) == 2
        src_shape, src_mask_shape = input_shape
        self.input_spec = [
            InputSpec(shape=src_shape),
            InputSpec(shape=src_mask_shape)
        ]

        self.src_emb_layer = EmbeddingsK(d_model=self.d_model,
                                         vocab=self.src_vocab)
        self.src_pe = PositionalEncodingK(d_model=self.d_model,
                                          dropout_rate=self.dropout_rate)

        self.encoder_mha_list = [
            MultiHeadedAttentionK(h=self.num_heads,
                                  d_model=self.d_model,
                                  dropout=self.dropout_rate)
            for _ in range(self.num_coder_blocks)
        ]

        self.encoder_pff_list = [
            PositionwiseFeedForwardK(d_model=self.d_model, d_ff=self.d_ff)
            for _ in range(self.num_coder_blocks)
        ]

        self.encoder_slc_mha_list = [
            SublayerConnectionK(size=self.d_model, dropout=self.dropout_rate)
            for _ in self.encoder_mha_list
        ]

        self.encoder_slc_pff_list = [
            SublayerConnectionK(size=self.d_model, dropout=self.dropout_rate)
            for _ in self.encoder_pff_list
        ]

        self.encoder_layer_norm = LayerNormK(features=d_model)

        super().build(input_shape)
    def build(self, input_shape):
        if len(input_shape) < 5:
            raise ValueError(
                'Inputs to `SeparableConv3D` should have rank 5. '
                'Received input shape:', str(input_shape))
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs to '
                             '`SeparableConv3D` '
                             'should be defined. Found `None`.')
        self.input_dim = int(input_shape[channel_axis])

        depthwise_kernel_shape = (self.kernel_size[0], self.kernel_size[1],
                                  self.kernel_size[2], 1,
                                  self.input_dim * self.depth_multiplier)

        self.depthwise_kernel = self.add_weight(
            shape=depthwise_kernel_shape,
            initializer=self.depthwise_initializer,
            name='depthwise_kernel',
            regularizer=self.depthwise_regularizer,
            constraint=self.depthwise_constraint)

        pointwise_kernel_shape = (1, 1, 1,
                                  self.input_dim * self.depth_multiplier,
                                  self.filters)

        self.pointwise_kernel = self.add_weight(
            shape=pointwise_kernel_shape,
            initializer=self.pointwise_initializer,
            name='pointwise_kernel',
            regularizer=self.pointwise_regularizer,
            constraint=self.pointwise_constraint)

        self.bias = None
        if self.use_pointwise_bias:
            self.pointwise_bias = self.add_weight(
                shape=self.filters,
                initializer=self.pointwise_bias_initializer,
                name='bias',
                regularizer=self.pointwise_bias_regularizer,
                constraint=self.pointwise_bias_constraint)
        else:
            self.pointwise_bias = None

        # Set input spec.
        self.input_spec = InputSpec(ndim=5,
                                    axes={channel_axis: self.input_dim})
        self.built = True
Esempio n. 29
0
 def __init__(
         self,
         filters,
         kernel_size=(3, 3),
         strides=1,
         padding='same',
         dilation_rate=(1, 1),
         kernel_initializer='glorot_uniform',
         kernel_regularizer=None,
         depthwise_initializer='glorot_uniform',
         depthwise_regularizer=None,
         depthwise_constraint=None,
         bias_regularizer=None,
         activity_regularizer=None,
         bias_initializer='zeros',
         kernel_constraint=None,
         bias_constraint=None,
         momentum=0.99,  # 0.98
         puffer=0.95,  # 0.95  # Aktivierung darf xx % des Maximalen Wertes nicht ueberschreiten 
         L_A=[1, 7],  # Integer Laenge Aktivierung (unsigned)
         L_W=[3, 5],  # Integer Laenge Aktivierung (unsigned)
         max_scale=6,
         **kwargs):
     super(SepConv2DNorm, self).__init__(**kwargs)
     self.filters = filters
     self.kernel_size = kernel_size
     self.strides = strides
     self.padding = padding
     self.data_format = 'channels_last'
     self.dilation_rate = dilation_rate
     self.depth_multiplier = 1
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(
         bias_initializer)  #// siehe unten weight*scale wird reg.
     self.depthwise_initializer = initializers.get(depthwise_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.depthwise_constraint = constraints.get(depthwise_constraint)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.L_W = L_W
     self.L_A = L_A
     self.momentum = momentum
     self.max_weight = 2**(L_W[0] - 1) * puffer
     self.max_activity = 2**L_A[0] - 2**-L_A[1]
     self.max_activity_signed = 2**(L_A[0] - 1) - 2**-L_A[1]
     self.max_activity_x = self.max_activity_signed * puffer
     self.w_scale_initializer = initializers.Constant(value=1.)
     self.max_scale = max_scale
     self.input_spec = InputSpec(ndim=4)
Esempio n. 30
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')

        input_dim = input_shape[channel_axis]
        kernel_shape = (input_dim, self.kernel_size[0], self.kernel_size[1],
                        self.filters)

        base = self.kernel_size[0] * self.kernel_size[1]
        if self.H == 'Glorot':
            nb_input = int(input_dim * base)
            nb_output = int(self.filters * base)
            self.H = np.float32(np.sqrt(1.5 / (nb_input + nb_output)))
            # print('Glorot H: {}'.format(self.H))

        if self.kernel_lr_multiplier == 'Glorot':
            nb_input = int(input_dim * base)
            nb_output = int(self.filters * base)
            self.kernel_lr_multiplier = np.float32(
                1. / np.sqrt(1.5 / (nb_input + nb_output)))
            # print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))

        self.kernel_constraint = Clip(-self.H, self.H)
        self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
        self.kernel = self.add_weight(shape=kernel_shape,
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        if self.use_bias:
            self.lr_multipliers = [
                self.kernel_lr_multiplier, self.bias_lr_multiplier
            ]
            self.bias = self.add_weight((self.output_dim, ),
                                        initializer=self.bias_initializers,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)

        else:
            self.lr_multipliers = [self.kernel_lr_multiplier]
            self.bias = None

        # Set input spec.
        self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
        self.built = True