Exemplo n.º 1
0
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(256, input_shape=(img_dim, )))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(128))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(2, activation='sigmoid'))

    return model
Exemplo n.º 2
0
def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(256))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(512))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Dense(img_dim, activation='tanh'))

    return model
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(
        Conv2D(64, kernel_size=(5, 5), padding='same', input_shape=img_dims))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=(5, 5), padding='same'))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('sigmoid'))

    return model
def stack_generator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(128 * 7 * 7, input_shape=(latent_dim, )))
    model.add(Activation('leaky_relu'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Reshape((128, 7, 7)))
    model.add(UpSampling2D())
    model.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
    model.add(BatchNormalization(momentum=0.8))
    model.add(Activation('leaky_relu'))
    model.add(UpSampling2D())
    model.add(Conv2D(img_channels, kernel_size=(5, 5), padding='same'))
    model.add(Activation('tanh'))

    return model
Exemplo n.º 5
0
    def add(self, layer):
        if self.layers:
            layer.input_shape = self.layers[-1].output_shape

        if hasattr(layer, 'weight_initializer'):
            layer.weight_initializer = self.init_method
        self.append_layer(layer)

        if hasattr(layer, 'layer_activation') and layer.layer_activation is not None:
            self.append_layer(Activation(layer.layer_activation, input_shape = self.layers[-1].output_shape))
Exemplo n.º 6
0
                                                                  test_size   = 0.3,
                                                                  random_seed = 3)

# plot samples of training data
plot_img_samples(train_data, train_label, dataset = 'cifar', channels = 3)

reshaped_image_dims = 3 * 32 * 32 # ==> (channels * height * width)
reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32'))
reshaped_test_data  = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32'))

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001)

model = Sequential()
model.add(Dense(1024, input_shape = (3072, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(100))
model.add(Activation('softmax'))
model.compile(loss = 'cce', optimizer = opt)

model.summary(model_name = 'cifar-100 mlp')