Beispiel #1
0
def base_model():
    model = Sequential()
    model.add(Flatten(input_shape=(28, 28)))
    model.add(Dropout(0.2))

    model.add(
        Dense(64, activation='relu', kernel_constraint=constraints.NonNeg()))
    model.add(Dropout(0.10))

    model.add(
        Dense(32, activation='relu', kernel_constraint=constraints.NonNeg()))
    model.add(Dropout(0.0))

    model.add(
        Dense(16, activation='relu', kernel_constraint=constraints.NonNeg()))
    model.add(Dropout(0.0))

    model.add(Dense(num_classes, activation='softmax',\
        kernel_constraint=constraints.NonNeg()))

    #sgd = SGD(lr = 0.1, decay=1e-6, momentum=0.9 nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
Beispiel #2
0
    def build(self, input_shape):
        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='kernel',
                                      shape=(input_shape[1], self.output_dim),
                                      initializer=self.init,
                                      trainable=True,
                                      regularizer=regularizers.l1_l2(l1=0.01,
                                                                     l2=0.01),
                                      constraint=constraints.NonNeg())

        if self.use_bias:
            self.bias = self.add_weight(shape=(1, ),
                                        initializer=self.init,
                                        name='bias',
                                        trainable=True,
                                        regularizer=regularizers.l1_l2(
                                            l1=0.01, l2=0.01),
                                        constraint=None)

            self.kernel = self.kernel / (K.sum(self.kernel) + 0.000001)

        else:
            self.bias = None

        super(SWAP, self).build(input_shape)  # Be sure to call this at the end
Beispiel #3
0
    def compile_model(self, learning_rate):

        i_input = layers.Input(shape=(1, ), dtype='int32')
        ij_input = layers.Input(shape=(self.n_pairs, ), dtype='int32')
        self.W = layers.Embedding(
            self.n,
            self.k,
            embeddings_constraint=constraints.NonNeg(),
            embeddings_initializer=initializers.RandomUniform(minval=0,
                                                              maxval=1),
            embeddings_regularizer=regularizers.l1(1e-3))

        squeeze_layer = layers.Lambda(lambda x: backend.squeeze(x, axis=1))
        w_i = squeeze_layer(self.W(i_input))
        w_j = self.W(ij_input)

        predicted_ij = PredictedIJ(self.k, name='predicted_ij')([w_i, w_j])

        self.keras_model = models.Model(
            inputs=[i_input, ij_input],
            outputs=predicted_ij,
        )
        self.keras_model.compile(optimizers.Adam(lr=learning_rate),
                                 loss='mse',
                                 sample_weight_mode='temporal')
Beispiel #4
0
    def build(self, input_shape):
        """
        Re-implement for free param kappa.

        For more info, see: https://elib.dlr.de/116408/1/WACV2018.pdf
        """
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        self.kernel = self.add_weight(shape=(input_dim, self.units),
                                      initializer=self.kernel_initializer,
                                      name='kernel',
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)
        self.kappa = self.add_weight(
            shape=(1, ),
            initializer=initializers.Constant(value=1.),
            name="kappa",
            regularizer=regularizers.l2(1e-1),
            constraint=constraints.NonNeg())
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units, ),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
        self.built = True
 def __init__(self,
              a_initializer='ones',
              k_initializer='ones',
              n_initializer='ones',
              z_initializer='zeros',
              a_regularizer=None,
              a_constraint=constraints.NonNeg(),
              k_regularizer=None,
              k_constraint=constraints.NonNeg(),
              n_regularizer=None,
              n_constraint=constraints.NonNeg(),
              z_regularizer=None,
              z_constraint=constraints.NonNeg(),
              shared_axes=None,
              a_shared=True,
              k_shared=True,
              n_shared=True,
              z_shared=True,
              z_one=False,
              **kwargs):
     super(Hill, self).__init__(**kwargs)
     self.supports_masking = True
     self.a_initializer = initializers.get(a_initializer)
     self.a_regularizer = regularizers.get(a_regularizer)
     self.a_constraint = constraints.get(a_constraint)
     self.k_initializer = initializers.get(a_initializer)
     self.k_regularizer = regularizers.get(a_regularizer)
     self.k_constraint = constraints.get(a_constraint)
     self.n_initializer = initializers.get(a_initializer)
     self.n_regularizer = regularizers.get(a_regularizer)
     self.n_constraint = constraints.get(a_constraint)
     self.z_initializer = initializers.get(a_initializer)
     self.z_regularizer = regularizers.get(a_regularizer)
     self.z_constraint = constraints.get(a_constraint)
     self.a_shared = a_shared
     self.k_shared = k_shared
     self.n_shared = n_shared
     self.z_shared = z_shared
     self.z_one = z_one
     if shared_axes is None:
         self.shared_axes = None
     elif not isinstance(shared_axes, (list, tuple)):
         self.shared_axes = [shared_axes]
     else:
         self.shared_axes = list(shared_axes)
Beispiel #6
0
 def build(self, input_shape):
     self.lammbda = self.add_weight(name='lammbda',
                                    shape=(self.k, ),
                                    initializer=initializers.Constant(
                                        1 / self.k),
                                    constraint=constraints.NonNeg(),
                                    trainable=True)
     super(PredictedIJ,
           self).build(input_shape)  # Be sure to call this at the end
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]

        self.input_channels = input_dim
        kernel_shape = self.kernel_size + (input_dim, self.filters)
        print("kernel shape:", kernel_shape)

        self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
        # Create a trainable weight variable for this layer.

        kernel_size = self.kernel_size
        # Idxs Init

        #mu = np.array([kernel_size[0] // 2, kernel_size[1] // 2])
        mu = np.array([0.5, 0.5])

        # Convert Types
        self.mu = mu.astype(dtype='float32')

        # Shared Parameters
        # below works for only two dimensional cov
        #self.cov = self.add_weight(shape=[input_dim*self.filters,2,2],
        #                          name="cov", initializer=cov_init, trainable=False)

        self.cov_scaler = self.add_weight(shape=(self.filters, ),
                                          name='scaler',
                                          initializer=scale_init,
                                          trainable=True,
                                          constraint=constraints.NonNeg())
        #constraint=constraints.non_neg())

        #print("Self.cov:",self.cov)
        #print("Self cov-scaler",self.cov_scaler)

        # below prepares a meshgrid.
        #self.idxs = self.add_weight(shape=[kernel_size[0]*kernel_size[1],2],
        #                           name="idxs", initializer=idx_init, trainable=False)

        self.idxs = idx_init(shape=[kernel_size[0] * kernel_size[1], 2])

        super(GaussScaler,
              self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #8
0
def base_model():
    model = Sequential()

    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=x_train.shape[1:]))
    model.add(Dropout(0.2))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Dropout(0.2))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Dropout(0.2))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(512,activation='relu',\
        kernel_constraint=constraints.NonNeg()))
    model.add(Dropout(0.2))
    model.add(Dense(num_classes, activation='softmax',\
        kernel_constraint=constraints.NonNeg()))

    #sgd = SGD(lr = 0.1, decay=1e-6, momentum=0.9 nesterov=True)
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
Beispiel #9
0
    def build_model(self, n_users, n_factors, l1_p):
        user = Input(shape=(1, ))
        u_emb = Embedding(n_users,
                          n_factors,
                          embeddings_constraint=constraints.NonNeg(),
                          embeddings_initializer=initializers.RandomUniform(
                              minval=0, maxval=1),
                          embeddings_regularizer=l1(l1_p))
        u = u_emb(user)
        u = Reshape((n_factors, ))(u)
        u_bias = Embedding(n_users,
                           1,
                           embeddings_constraint=constraints.NonNeg(),
                           embeddings_initializer=initializers.RandomUniform(
                               minval=0, maxval=1),
                           embeddings_regularizer=l1(l1_p))
        ub = u_bias(user)
        ub = Reshape((1, ))(ub)

        item = Input(shape=(1, ))
        m = u_emb(item)
        m = Reshape((n_factors, ))(m)
        mb = u_bias(item)
        mb = Reshape((1, ))(mb)

        x = Dot(axes=1)([u, m])
        x = Add()([x, ub, mb])

        model = Model(inputs=[user, item], outputs=x)
        opt = Adam(lr=0.001)
        model.compile(loss='mean_squared_error', optimizer=opt)

        user_embedding_model = Model(inputs=user, outputs=u)
        user_bias_model = Model(inputs=user, outputs=ub)

        return [model, user_embedding_model, user_bias_model]
Beispiel #10
0
        def _build_model(self):
            
        #--------------------Quan Block---------    
            inp = Input(shape=(self.input_shape))
            i = inp
            global lambdaq
            lambdaq= self.lambdaq
            quan = Quan(kernel_regularizer=MyReg,trainable = True,kernel_constraint=constraints.NonNeg())
            i = quan(i)
        #-------------------ResNet-20
            outp = self.resnet_v1(self.input_shape,self.depth)(i)
        
            

            model = Model(inputs=inp, outputs=outp)

            self.model=model
 def build(self, input_shape):
     conv_shape = (1, 1, 64, 64)
     self.it_weights = self.add_weight(shape=(1, 1, 64, 1),
                                       initializer=initializers.get('ones'),
                                       constraint=constraints.NonNeg(),
                                       name='ait_conv')
     kernel = np.zeros(conv_shape)
     r1 = sqrt(1.0 / 8)
     r2 = sqrt(2.0 / 8)
     for i in range(8):
         _u = 2 * i + 1
         for j in range(8):
             _v = 2 * j + 1
             index = i * 8 + j
             for u in range(8):
                 for v in range(8):
                     index2 = u * 8 + v
                     t = cos(_u * u * pi / 16) * cos(_v * v * pi / 16)
                     t = t * r1 if u == 0 else t * r2
                     t = t * r1 if v == 0 else t * r2
                     kernel[0, 0, index2, index] = t
     self.kernel = k.variable(value=kernel, dtype='float32')
Beispiel #12
0
    def build(self, input_shape):
        if self.data_format == 'channels_first':
            channel_axis = 1
        else:
            channel_axis = -1
        if input_shape[channel_axis] is None:
            raise ValueError('The channel dimension of the inputs '
                             'should be defined. Found `None`.')
        input_dim = input_shape[channel_axis]
        
        self.input_channels = input_dim
        kernel_shape = self.kernel_size + (input_dim, self.nfilters)
        print("kernel shape:",kernel_shape)

        self.bias = None
        # Set input spec.
        self.input_spec = InputSpec(ndim=self.rank + 2,
                                    axes={channel_axis: input_dim})
        self.built = True
        # Create a trainable weight variable for this layer.
        
        kernel_size = self.kernel_size
        # Idxs Init
        #mu = np.array([kernel_size[0] // 2, kernel_size[1] // 2])
        mu = np.array([0.5, 0.5])


        # Convert Types
        self.mu = mu.astype(dtype='float32')

        # Shared Parameters
        # below works for only two dimensional cov 
        #self.cov = self.add_weight(shape=[input_dim*self.filters,2,2], 
        #                          name="cov", initializer=cov_init, trainable=False)
        
        #from functools import partial

        #sigma_initializer = partial(sigma_init,initsigma=self.initsigma)
        


    
        self.idxs= idx_init(shape=[kernel_size[0]*kernel_size[1],2])
        
        self.Sigma = self.add_weight(shape=(self.nfilters,),
                                          name='Sigma',
                                          initializer=self.sigma_initializer,
                                          trainable=self.trainSigmas,
                                          constraint= constraints.NonNeg(),
                                          regularizer=None)
        
        self.W = self.add_weight(shape=[kernel_size[0],kernel_size[1],
                                        self.input_channels,self.nfilters],
                                 name='Weights',
                                 initializer=self.weight_initializer,
                                 #initializer=initializers.he_uniform(),
                                 trainable=True,
                                 regularizer = self.kernel_regularizer,
                                 constraint=None)
        
        
       
#        self.gain = self.add_weight(shape=(self.nfilters,),
#                                          name='Gain',
#                                          initializer=initializers.constant(1.0),
#                                          trainable=self.trainGain,
#                                          constraint= constraints.NonNeg(),
#                                          regularizer=None)
#                                      initializer=initializers.,
#                                      name='kernel',trainable=False,
#                                      regularizer=None,
#                                      constraint=None)
        self.kernel=None
        
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.nfilters,),
                                        initializer=self.bias_initializer,
                                        name='bias',
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)
        else:
            self.bias = None
        
        super(Conv2DAdaptive, self).build(input_shape)  # Be sure to call this somewhere!
Beispiel #13
0
    clear_session()
    print('clearing Keras session...')
    #input
    input_tensor = Input(shape=X_tr.shape[1:], name='input_tensor')

    ####################################
    # encoder (contracting path)
    ####################################
    #encoder block 0
    e0 = Conv2D(filters=1,
                use_bias=False,
                kernel_size=(1, 1),
                padding='same',
                name='input_filter',
                kernel_regularizer=regularizers.l1(lmb1),
                kernel_constraint=constraints.NonNeg())(input_tensor)
    e0 = Conv2D(filters=nfilters[0], kernel_size=(3, 3), padding='same')(e0)
    e0 = BatchNormalization(axis=bnorm_axis)(e0)
    e0 = Activation('relu')(e0)
    e0 = Conv2D(filters=nfilters[0], kernel_size=(3, 3), padding='same')(e0)
    e0 = BatchNormalization(axis=bnorm_axis)(e0)
    e0 = Activation('relu')(e0)

    #encoder block 1
    e1 = MaxPooling2D((2, 2))(e0)
    e1 = Conv2D(filters=nfilters[1], kernel_size=(3, 3), padding='same')(e1)
    e1 = BatchNormalization(axis=bnorm_axis)(e1)
    e1 = Activation('relu')(e1)
    e1 = Conv2D(filters=nfilters[1], kernel_size=(3, 3), padding='same')(e1)
    e1 = BatchNormalization(axis=bnorm_axis)(e1)
    e1 = Activation('relu')(e1)
    def __init__(self,
                 units: int,
                 is_first: bool = False,
                 is_last: bool = False,
                 attention_factor: float = 1.,
                 dropout: float = 0.,
                 mean_activation: any = 'elu',
                 mean_initializer: any = 'glorot_uniform',
                 mean_regularizer: any = regularizers.l2(5e-4),
                 mean_constraint: any = None,
                 variance_activation: any = 'relu',
                 variance_initializer: any = 'glorot_uniform',
                 variance_regularizer: any = regularizers.l2(5e-4),
                 variance_constraint: any = constraints.NonNeg(),
                 last_activation: any = None,
                 **kwargs):
        """
        Create a new Gaussian graph convolution layer.

        Args:
            units: the number of units in the layer
            is_first: whether this is the first Gaussian graph convolution layer.
                      If true, the inputs are just the features and graph; if
                      false, the inputs are mean, variance, and graph
            is_last: whether this is the last Gaussian graph convolution layer.
                     If true, the output is sampled from the mean and variance;
                     if false, the outputs are the mean and variance.
            attention_factor: the attention factor ([0, 1], 1 is best)
            dropout: the dropout rate to apply to the mean and variance output
            mean_activation: the activation function for the mean outputs
            mean_initializer: the initializer for the mean weights
            mean_regularizer: the regularize for the mean weights
            mean_constraint: the constraint mechanism for the mean weights
            variance_activation: the activation function for the variance outputs
            variance_initializer: the initializer for the variance weights
            variance_regularizer: the regularize for the variance weights
            variance_constraint: the constraint mechanism for the variance weights
            last_activation: the activation function to apply if is_last is true

        Returns:
            None

        """
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super().__init__(**kwargs)
        self.units = units
        self.is_first = is_first
        self.is_last = is_last
        self.attention_factor = attention_factor
        self.dropout = dropout
        self.mean_activation = activations.get(mean_activation)
        self.mean_initializer = initializers.get(mean_initializer)
        self.mean_regularizer = regularizers.get(mean_regularizer)
        self.mean_constraint = constraints.get(mean_constraint)
        self.variance_activation = activations.get(variance_activation)
        self.variance_initializer = initializers.get(variance_initializer)
        self.variance_regularizer = regularizers.get(variance_regularizer)
        self.variance_constraint = constraints.get(variance_constraint)
        self.last_activation = activations.get(last_activation)
        self.supports_masking = True
        # setup model variables
        self.mean_weight = None
        self.variance_weight = None