Ejemplo n.º 1
0
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(Dense(256, input_shape=(img_dim, )))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(128))
    model.add(Activation('leaky_relu', alpha=0.2))
    model.add(Dropout(0.25))
    model.add(Dense(2, activation='sigmoid'))

    return model
Ejemplo n.º 2
0
def stack_discriminator_layers(init):
    model = Sequential(init_method=init)
    model.add(
        Conv2D(64, kernel_size=(5, 5), padding='same', input_shape=img_dims))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(128, kernel_size=(5, 5), padding='same'))
    model.add(Activation('leaky_relu'))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(2))
    model.add(Activation('sigmoid'))

    return model
Ejemplo n.º 3
0
data = datasets.load_digits()
plot_digits_img_samples(data)

train_data, test_data, train_label, test_label = train_test_split(
    data.data, data.target, test_size=0.33, random_seed=5)

opt = register_opt(optimizer_name='adam', momentum=0.01, learning_rate=0.001)

model = Sequential(init_method='he_uniform')
model.add(
    Conv2D(filters=32,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(1, 8, 8),
           padding='same'))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(
    Conv2D(filters=64, kernel_size=(3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax'))  # 10 digits classes
model.compile(loss='categorical_crossentropy', optimizer=opt)

model_epochs = 12
fit_stats = model.fit(train_data.reshape(-1, 1, 8, 8),
Ejemplo n.º 4
0
                                                                  random_seed = 3)

# plot samples of training data
plot_img_samples(train_data, train_label, dataset = 'cifar', channels = 3)

reshaped_image_dims = 3 * 32 * 32 # ==> (channels * height * width)
reshaped_train_data = z_score(train_data.reshape(train_data.shape[0], reshaped_image_dims).astype('float32'))
reshaped_test_data  = z_score(test_data.reshape(test_data.shape[0], reshaped_image_dims).astype('float32'))

# optimizer definition
opt = register_opt(optimizer_name = 'adam', momentum = 0.01, lr = 0.0001)

model = Sequential()
model.add(Dense(1024, input_shape = (3072, )))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(BatchNormalization())
model.add(Dense(100))
model.add(Activation('softmax'))
model.compile(loss = 'cce', optimizer = opt)

model.summary(model_name = 'cifar-100 mlp')
Ejemplo n.º 5
0
from ztlearn.dl.layers import Dropout, Dense, BatchNormalization

mnist = fetch_mnist()
train_data, test_data, train_label, test_label = train_test_split(mnist.data,
                                                                  mnist.target.astype('int'),
                                                                  test_size   = 0.33,
                                                                  random_seed = 5,
                                                                  cut_off     = 2000)

# plot samples of training data
plot_tiled_img_samples(train_data[:40], train_label[:40], dataset = 'mnist')

# model definition
model = Sequential()
model.add(Dense(512, activation = 'relu', input_shape = (784,)))
model.add(Dropout(0.3))
model.add(BatchNormalization())
model.add(Dense(10, activation = 'relu')) # 10 digits classes
model.compile(loss = 'cce', optimizer = Adam())

model.summary()

model_epochs = 12
fit_stats = model.fit(train_data,
                      one_hot(train_label),
                      batch_size      = 128,
                      epochs          = model_epochs,
                      validation_data = (test_data, one_hot(test_label)),
                      shuffle_data    = True)

eval_stats = model.evaluate(test_data, one_hot(test_label))