示例#1
0
def identity_block(input_tensor, kernel_size, filters, stage, num_block, trainable=True):
    
    filter1, filter2, filter3 = filters
    conv_name_base = 'conv' + str(stage) + '_block' + num_block
    #layer1
    x = kl.Conv2D(filter1, (1, 1), name = conv_name_base +'_1_conv',
               padding = 'same',        
               # kernel_initializer = ki.Ones(),
               kernel_initializer = ki.he_normal(),                
               bias_initializer = ki.Zeros(),
               trainable = trainable)(input_tensor) 
    x = kl.BatchNormalization(name = conv_name_base + '_1_bn')(x) 
    x = kl.Activation('relu')(x)
    
    #layer2  
    x = kl.Conv2D(filter2,(kernel_size, kernel_size), padding = 'same',
               # kernel_initializer = ki.Ones(),
               kernel_initializer = ki.he_normal(),                
               bias_initializer = ki.Zeros(),
               name = conv_name_base +'b', trainable = trainable)(x) 
    x = kl.BatchNormalization(name = conv_name_base + '_2_bn')(x) 
    x = kl.Activation('relu')(x)    
    
    #layer3    
    x = kl.Conv2D(filter3,(1, 1), name = conv_name_base +'c',
               padding = 'same',
               # kernel_initializer = ki.Ones(),
               kernel_initializer = ki.he_normal(),                
               bias_initializer=ki.Zeros(),
               trainable = trainable)(x) 
    x = kl.BatchNormalization(name = conv_name_base + '_3_bn')(x) 
    x = kl.Add()([x, input_tensor])
    x = kl.Activation('relu')(x)    
    return x
示例#2
0
 def adjust_weights_lms(self):
     self.weight_vec = np.array([])
     self.bias_vec = 0
     self.mse_arr = np.array([])
     self.mae_arr = np.array([])
     self.normalized_data = self.normalize_data()
     self.data_samples, self.targets = self.make_samples_targets()
     neuron_lms_seqmodel = Sequential()
     neuron_lms_seqmodel.add(
         Dense(units=1,
               input_dim=2 * (self.delayedElements_val + 1),
               kernel_initializer=initializers.Zeros()))
     error_optimizer = optimizers.SGD(lr=2 * self.alpha_val)
     neuron_lms_seqmodel.compile(
         loss='mse',
         optimizer=error_optimizer,
         metrics=[mean_squared_error, max_absolute_err])
     validation_splitval = round((1 - (self.trainingSampleSize_val / 100)),
                                 2)
     scores = neuron_lms_seqmodel.fit(self.data_samples,
                                      self.targets,
                                      batch_size=32,
                                      epochs=self.numIterations_val,
                                      validation_split=validation_splitval)
     self.weight_vec = neuron_lms_seqmodel.layers[0].get_weights()[0]
     self.bias_vec = neuron_lms_seqmodel.layers[0].get_weights()[1][0]
     self.mse_arr = scores.history['val_mean_squared_error']
     self.mae_arr = scores.history['val_max_absolute_err']
     self.plot_error()
示例#3
0
    def build(self, input_dim):
        self.volterra_kernel = self.add_weight(
            name='volterra_kernel',
            shape=(input_dim[-1], self.kernel_size[0]**2,
                   self.kernel_size[1]**2, self.filters),
            initializer=initializers.he_normal(seed=seed),
            trainable=True)

        self.linear_kernel = self.add_weight(
            name='linear_kernel',
            shape=(self.kernel_size[0], self.kernel_size[1], input_dim[-1],
                   self.filters),
            initializer=initializers.he_normal(seed=seed),
            trainable=True)

        if self.use_bias:
            self.bias = self.add_weight(
                shape=(self.filters, ),
                initializer=initializers.Zeros(),
                name='bias',
            )
        else:
            self.bias = None

        super(QuadraticLayer, self).build(input_dim)
示例#4
0
def add_initializer(model,
                    kernel_initializer=initializers.random_normal(stddev=0.01),
                    bias_initializer=initializers.Zeros()):
    for layer in model.layers:
        if hasattr(layer, "kernel_initializer"):
            layer.kernel_initializer = kernel_initializer
        if hasattr(layer, "bias_initializer"):
            layer.bias_initializer = bias_initializer
示例#5
0
    def __init__(self,
                 xtrain,
                 ytrain,
                 xval,
                 yval,
                 xtest,
                 ytest,
                 wi=14,
                 dr=0.4,
                 ac='relu',
                 acpar=0.1,
                 bs=2048):

        # INITALIZE HYPERPARAMETERS ###
        self.width = wi  # Integer
        self.droprate = dr  # Float 0 <= x < 1
        self.activation = ac  # String 'relu' 'elu' 'sigmoid' etc.
        self.activation_par = acpar
        self.batchsize = bs  # Integer
        self.x_train = xtrain
        self.x_validate = xval
        self.x_test = xtest
        self.y_train = ytrain
        self.y_validate = yval
        self.y_test = ytest

        # GENERATE PATHNAME
        self.name = '{}{}{}{}{}'.format(self.activation, self.batchsize,
                                        self.droprate, self.width,
                                        self.activation_par)
        self.path = '{}{}'.format('../Data/Results/AutoEncoder/', self.name)

        # INITALIZE CHOICE OF KERAS FUNCTIONS #
        self.model = Sequential()

        self.sgd = optimizers.SGD(lr=0.01, momentum=0.001, decay=0.001)
        self.adagrad = optimizers.Adagrad(lr=0.01, epsilon=None, decay=0.0)
        self.adam = optimizers.Adam(lr=0.001,
                                    beta_1=0.9,
                                    beta_2=0.999,
                                    epsilon=10e-8,
                                    decay=0.001,
                                    amsgrad=False)
        self.cb = callbacks.EarlyStopping(monitor='val_loss',
                                          min_delta=0.0001,
                                          patience=50,
                                          verbose=1,
                                          mode='min',
                                          baseline=None,
                                          restore_best_weights=True)
        initializers.VarianceScaling(scale=1.0,
                                     mode='fan_in',
                                     distribution='normal',
                                     seed=None)
        initializers.he_normal(151119)
        initializers.Zeros()
示例#6
0
    def build(self, input_shape):
        self.kernel = self.add_weight(name='kernel',
                                      shape=(self.kernel_size, input_shape[-1], self.filters),
                                      initializer=initializers.random_uniform(seed=self.seed),
                                      trainable=True)

        self.bias = self.add_weight(name='bias',
                                    shape=(self.filters,),
                                    initializer=initializers.Zeros(),
                                    trainable=True)

        super(ConvLayer, self).build(input_shape)
示例#7
0
 def pretrain_discriminator(self, epochs):
     train_X, train_d, train_y, test_X, test_d, test_y = train_test(
         self.dir, self.n_steps_in, self.n_steps_out)
     init_z = initializers.Zeros()
     one = init_z(shape=(self.batch_size, self.n_steps_out,
                         self.latent_dim))
     optimizer = Adam(hparams.dis_pretrain_learning_rate)
     self.generator.compile(loss='binary_crossentropy',
                            optimizer=optimizer,
                            metrics=['accuracy'])
     for epoch in range(epochs):
         y_pre = self.discriminator.train_on_batch(train_X, one)
 def build(self, input_shape):
     self.num_layers = input_shape[1]
     self.W = self.add_weight(shape=(self.num_layers, ),
                              initializer=initializers.Zeros(),
                              regularizer=regularizers.get(
                                  regularizers.l2(self.l2_coef)),
                              name='{}_w'.format(self.name))
     if self.scale:
         self.gamma = self.add_weight(shape=(1, ),
                                      initializer=initializers.Ones(),
                                      name='{}_gamma'.format(self.name))
     super(WeightedAverage, self).build(input_shape)
示例#9
0
    def build(self, input_shape):
        assert len(input_shape) >= 2
        input_dim = input_shape[-1]

        f = self._make_liftering(input_dim, self.Q)
        if self.inv:
            f = 1 / f

        self.filter = self.add_weight((input_dim, ),
                                      initializer=initializers.Zeros(),
                                      name='filter')

        self._initial_weights = [f]
        self.input_spec = InputSpec(dtype=K.floatx(),
                                    min_ndim=2,
                                    axes={-1: input_dim})
        self.built = True