def create_synapse(): 
    memory_input = Input(shape=(SYN_SIZE,))

    pretender1_input = Input(shape=(SYN_SIZE,))
    pretender2_input = Input(shape=(LATENT_DIM,))

    gate_in = Sequential()
    gate_in.add(Dense(SYN_SIZE, input_shape=(SYN_SIZE,), activation='relu'))
    gate_in.add(Dropout(1-1 / NUM_SYNAPSES))
    gate_in.add(Dense(LATENT_DIM, activation='hard_sigmoid'))

    gate_out = Sequential()
    gate_out.add(Dense(SYN_SIZE, input_shape=(LATENT_DIM,), activation='relu'))
    gate_out.add(Dropout(1-1 / NUM_SYNAPSES))
    gate_out.add(Dense(SYN_SIZE, activation='sigmoid'))

    task = Sequential()
    task.add(Dense(SYN_SIZE, input_shape=(SYN_SIZE,), activation='relu'))
    task.add(Dense(OUT_SIZE, activation='hard_sigmoid'))

    projector1 = Sequential()
    projector1.add(Dense(SYN_SIZE, input_shape=(LATENT_DIM,), activation='tanh'))
    projector1.add(Dense(NUM_FLAGS, activation='sigmoid'))

    projector2 = Sequential()
    projector2.add(Dense(SYN_SIZE, input_shape=(SYN_SIZE,), activation='tanh'))
    projector2.add(Dense(NUM_FLAGS, activation='sigmoid'))

    memory = Model(memory_input, gate_out(gate_in(memory_input)))
    memory.compile(optimizer=SGD(), loss="mean_squared_error")

    operator = Model(memory_input, task(gate_out(gate_in(memory_input))))
    operator.compile(optimizer=SGD(), loss="binary_crossentropy")

    projector1.compile(optimizer=Adadelta(), loss="mean_absolute_error")
    projector1.trainable = False

    projector2.compile(optimizer=Adadelta(), loss="mean_absolute_error")
    projector2.trainable = False

    pretender1 = Model(pretender1_input, projector1(gate_in(pretender1_input)))
    pretender1.compile(optimizer=RMSprop(), loss="binary_crossentropy")

    pretender2 = Model(pretender2_input, projector2(gate_out(pretender2_input)))
    pretender2.compile(optimizer=RMSprop(), loss="binary_crossentropy")

    return memory, projector1, projector2, pretender1, pretender2, gate_in, gate_out, operator
Exemplo n.º 2
0
def test_nested_sequential_trainability():
    input_dim = 20
    num_units = 10
    num_classes = 2

    inner_model = Sequential()
    inner_model.add(Dense(num_units, input_shape=(input_dim,)))

    model = Sequential()
    model.add(inner_model)
    model.add(Dense(num_classes))

    assert len(model.trainable_weights) == 4
    inner_model.trainable = False
    assert len(model.trainable_weights) == 2
    inner_model.trainable = True
    assert len(model.trainable_weights) == 4
Exemplo n.º 3
0
def get_model():
    # Optimizer
    adam = Adam(lr=0.0002, beta_1=0.5)

    # Generator
    generator = Sequential()
    generator.add(Dense(128*7*7, input_dim=randomDim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    generator.add(LeakyReLU(0.2))
    generator.add(Reshape((128, 7, 7)))
    generator.add(UpSampling2D(size=(2, 2)))
    generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
    generator.add(LeakyReLU(0.2))
    generator.add(UpSampling2D(size=(2, 2)))
    generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
    generator.compile(loss='binary_crossentropy', optimizer=adam)

    # Discriminator
    discriminator = Sequential()
    discriminator.add(Conv2D(64, kernel_size=(5, 5), strides=(2, 2), padding='same', input_shape=(1, 28, 28), kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))
    discriminator.add(Conv2D(128, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))
    discriminator.add(Flatten())
    discriminator.add(Dense(1, activation='sigmoid'))
    discriminator.compile(loss='binary_crossentropy', optimizer=adam)

    # Combined network
    discriminator.trainable = False
    ganInput = Input(shape=(randomDim,))
    x = generator(ganInput)
    ganOutput = discriminator(x)
    gan = Model(inputs=ganInput, outputs=ganOutput)
    gan.compile(loss='binary_crossentropy', optimizer=adam)

    return generator, discriminator, gan
Exemplo n.º 4
0
discriminator.add(
    Conv2D(512,
           kernel_size=(3, 3),
           strides=(2, 2),
           padding='same',
           name='CONV4'))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))

discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid', name='FF1'))

discriminator.load_weights('pretrain_discriminator_weights.h5', by_name=True)
discriminator.compile(loss='binary_crossentropy', optimizer=adam)
''' Combined network '''
discriminator.trainable = False
ganInput = Input(shape=(randomDim, ))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)

dLosses = []
gLosses = []


# Plot the loss from each batch
def plotLoss(epoch):
    plt.figure(figsize=(10, 8))
    plt.plot(dLosses, label='Discriminitive loss')
    plt.plot(gLosses, label='Generative loss')
Exemplo n.º 5
0
Discriminator = Sequential()

Discriminator.add(Conv2D(filters = 2, input_shape = (75, 50, 3), kernel_size = (3, 3), padding = "valid", activation = "relu"))
Discriminator.add(MaxPooling2D((2, 2)))
Discriminator.add(Conv2D(filters = 16, kernel_size = (3, 3), padding = "valid", activation = "relu"))
Discriminator.add(MaxPooling2D((2, 2)))
Discriminator.add(Conv2D(filters = 32, kernel_size = (3, 3), padding = "valid", activation = "relu"))
Discriminator.add(MaxPooling2D((2, 2)))
Discriminator.add(Conv2D(filters = 64, kernel_size = (3, 3), padding = "valid", activation = "relu"))
Discriminator.add(Flatten())
Discriminator.add(Dense(1, activation = "sigmoid"))

Discriminator.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])


Discriminator.trainable = False

GAN = Sequential()
GAN.add(Generator)
GAN.add(Discriminator)

GAN.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])

Generator.summary()
Discriminator.summary()
GAN.summary()


for epoch in range(nEpochs):
    cur_outs = []
    GN = GenerateNoise(noise_dim, Batch_size)
Exemplo n.º 6
0
def main(mode='generate', pretty=False, resume=False):
    # define generator model
    generator = Sequential()
    generator.add(Dense(input_dim=noise_vector_dim, output_dim=1024))
    generator.add(Activation('tanh'))
    generator.add(Dense(128 * 7 * 7))
    generator.add(BatchNormalization())
    generator.add(Activation('tanh'))
    generator.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7, )))
    generator.add(UpSampling2D(size=(2, 2), dim_ordering="th"))
    generator.add(
        Convolution2D(64, 5, 5, border_mode='same', dim_ordering="th"))
    generator.add(Activation('tanh'))
    generator.add(UpSampling2D(size=(2, 2), dim_ordering="th"))
    generator.add(Convolution2D(1, 5, 5, border_mode='same',
                                dim_ordering="th"))
    generator.add(Activation('tanh'))

    # define discriminator model
    discriminator = Sequential()
    discriminator.add(
        Convolution2D(64,
                      5,
                      5,
                      border_mode='same',
                      input_shape=(1, 28, 28),
                      dim_ordering="th"))
    discriminator.add(Activation('tanh'))
    discriminator.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
    discriminator.add(
        Convolution2D(128, 5, 5, border_mode='same', dim_ordering="th"))
    discriminator.add(Activation('tanh'))
    discriminator.add(MaxPooling2D(pool_size=(2, 2), dim_ordering="th"))
    discriminator.add(Flatten())
    discriminator.add(Dense(1024))
    discriminator.add(Activation('tanh'))
    discriminator.add(Dense(1))
    discriminator.add(Activation('sigmoid'))

    # define gan model by connecting generator output to discriminator input
    gan = Sequential()
    gan.add(generator)
    discriminator.trainable = False
    gan.add(discriminator)

    training_history = {'discriminator': [], 'gan': []}

    if mode == 'train':
        # load mnist data and convert to float32 with range -1 to 1
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = X_train.reshape((X_train.shape[0], 1) + X_train.shape[1:])

        # define optimizers
        d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
        g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)

        # compile the models
        generator.compile(loss='binary_crossentropy', optimizer="SGD")
        gan.compile(loss='binary_crossentropy', optimizer=g_optim)
        # once gan is compiled,
        # set discriminator back to trainable (we alternate between training generator and discriminator)
        discriminator.trainable = True
        discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)

        if resume:
            # load pretrained weights to continue from
            generator.load_weights('generator_final.h5')
            discriminator.load_weights('discriminator_final.h5')

        noise = np.zeros((batch_size, noise_vector_dim))

        num_batches = int(X_train.shape[0] / batch_size)
        # perform training
        for epoch in range(num_epochs):
            print("Epoch {}/{}".format(epoch + 1, num_epochs))
            for index in range(num_batches):
                noise = np.random.uniform(-1, 1,
                                          (batch_size, noise_vector_dim))
                image_batch = X_train[index * batch_size:(index + 1) *
                                      batch_size]
                generated_images = generator.predict(noise, verbose=0)
                if (index + 1) % save_interval == 0:
                    image = combine_images(generated_images)
                    image = image * 127.5 + 127.5
                    Image.fromarray(image.astype(np.uint8)).save(
                        os.path.join(
                            output_dir,
                            "image_{}_{}.png".format(epoch + 1, index + 1)))
                X = np.concatenate((image_batch, generated_images))
                y = np.concatenate((np.ones(batch_size), np.zeros(batch_size)))

                d_loss = discriminator.train_on_batch(X, y)
                noise = np.random.uniform(-1, 1,
                                          (batch_size, noise_vector_dim))
                discriminator.trainable = False
                gan_loss = gan.train_on_batch(noise, np.ones(batch_size))
                discriminator.trainable = True

                print(
                    "Trained batch {}/{}: Discriminator loss = {}, GAN loss = {}"
                    .format(index + 1, num_batches, d_loss, gan_loss))
                training_history['discriminator'].append(d_loss)
                training_history['gan'].append(gan_loss)
            # generator.save_weights(os.path.join(output_dir, 'generator_{}.h5'.format(epoch + 1)),
            #                        True)
            # discriminator.save_weights(
            #     os.path.join(output_dir, 'discriminator_{}.h5'.format(epoch + 1)), True)
            # with open(os.path.join(output_dir, 'partial_training_history_{}.pickle'.format(epoch + 1)), 'wb') as f:
            #     pickle.dump(training_history, f)
            # print("Generator & Discriminator weights, & Training history so far saved in directory = {}".format(
            #     output_dir))
        generator.save_weights(os.path.join(output_dir, 'generator_final.h5'),
                               True)
        discriminator.save_weights(
            os.path.join(output_dir, 'discriminator_final.h5'), True)
        with open(os.path.join(output_dir, 'training_history.pickle'),
                  'wb') as f:
            pickle.dump(training_history, f)

    elif mode == 'generate':
        generator.compile(loss='binary_crossentropy', optimizer="SGD")
        generator.load_weights(os.path.join(output_dir, 'generator_final.h5'))
        if pretty:
            discriminator.compile(loss='binary_crossentropy', optimizer="SGD")
            discriminator.load_weights(
                os.path.join(output_dir, 'discriminator_final.h5'))

            # plot_model(generator, to_file='generator_model.png')
            # plot_model(discriminator, to_file='discriminator_model.png')
            noise = np.zeros((batch_size * 20, noise_vector_dim))
            for i in range(batch_size * 20):
                noise[i, :] = np.random.uniform(-1, 1, noise_vector_dim)
            generated_images = generator.predict(noise, verbose=1)
            d_pred = discriminator.predict(generated_images, verbose=1)
            index = np.arange(0, batch_size * 20)
            index.resize((batch_size * 20, 1))
            pre_with_index = list(np.append(d_pred, index, axis=1))
            pre_with_index.sort(key=lambda x: x[0], reverse=True)
            pretty_images = np.zeros(
                (batch_size, 1) + (generated_images.shape[2:]),
                dtype=np.float32)
            for i in range(int(batch_size)):
                idx = int(pre_with_index[i][1])
                pretty_images[i, 0, :, :] = generated_images[idx, 0, :, :]
            image = combine_images(pretty_images)
        else:
            noise = np.zeros((batch_size, noise_vector_dim))
            for i in range(batch_size):
                noise[i, :] = np.random.uniform(-1, 1, noise_vector_dim)
            generated_images = generator.predict(noise, verbose=1)
            image = combine_images(generated_images)
        image = image * 127.5 + 127.5
        Image.fromarray(image.astype(np.uint8)).save(
            os.path.join(output_dir, "generated_image.png"))

    else:
        print("INVALID MODE SPECIFIED!!")
sgd = SGD(lr=0.01, momentum=0.1)
decoder.compile(loss='binary_crossentropy', optimizer=sgd)

print("Setting up generator")
generator = Sequential()
generator.add(Dense(16, input_dim=1, activation='relu'))
generator.add(Dense(16, activation='relu'))
generator.add(Dense(1, activation='linear'))

generator.compile(loss='binary_crossentropy', optimizer=sgd)

print("Setting up combined net")
gen_dec = Sequential()
gen_dec.add(generator)
decoder.trainable = False
gen_dec.add(decoder)

'''def inverse_binary_crossentropy(y_true, y_pred):
    if theano.config.floatX == 'float64':
        epsilon = 1.0e-9
    else:
        epsilon = 1.0e-7
    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
    bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=-1)
    return -bce

gen_dec.compile(loss=inverse_binary_crossentropy, optimizer=sgd)'''

gen_dec.compile(loss='binary_crossentropy', optimizer=sgd)
Exemplo n.º 8
0
y_train = [1 for i in range(X_train.shape[0])]

descriptive_model = Sequential()
generative_model = Sequential()

descriptive_model.add(Dense(input_dim=784, output_dim=250))
descriptive_model.add(Activation('sigmoid'))
descriptive_model.add(Dense(1))
descriptive_model.add(Activation('sigmoid'))

generative_model.add(Dense(input_dim=3000, output_dim=1500))
generative_model.add(Activation('relu'))
generative_model.add(Dense(784))
generative_model.add(Activation('sigmoid'))

descriptive_model.trainable = False
generative_model.add(descriptive_model)
generative_model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True), metrics=['accuracy'])

descriptive_model.trainable = True
descriptive_model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True), metrics=['accuracy'])

batch_size = 32
fig = plt.figure()
fixed_noise = np.random.rand(1, dim).astype('float32')

progbar = generic_utils.Progbar(50)

def run(uh, nb_epoch, id, turnaround=False):
    for e in range(nb_epoch):
        acc0 = 0
Exemplo n.º 9
0
# In[ ]:

plot_history(history_num)

# Can be as high as 77%

# ## Merge the two models

# In[ ]:

# one option is to freeze the first two models
# model_cat.trainable = False
# model_num.trainable = False
# but we will not
model_cat.trainable = True
model_num.trainable = True

# In[ ]:

merge = concatenate([model_num.output, model_cat.output])

# d1 =  Dense(8, activation='relu')(merge)
# # d1n = BatchNormalization()(d1)
# d2 =  Dense(16, activation='relu')(d1)
# # d2n = BatchNormalization()(d2)
# d3 =  Dense(8, activation='relu')(d2)
# # d3n = BatchNormalization()(d2)
# d4 =  Dense(4, activation='relu')(d3)

output = Dense(1, activation='sigmoid')(merge)
Exemplo n.º 10
0
dis_model.add(Flatten())

dis_model.add(Dense(1, activation='sigmoid'))

# visualization of discriminator model
dis_model.summary()

# compiling the model
dis_model.compile(Adam(lr=0.0003, beta_1=0.5),
                  loss='binary_crossentropy',
                  metrics=['binary_accuracy'])

#combine the gen_model and the dis_model to make a GAN.

# dis_gen = discriminator(generador(z))
dis_model.trainable = False

input = Input(shape=(dim, ))
img = gen_model(input)
output = dis_model(img)
dis_gen = Model(inputs=input, outputs=output)

# combine compilation of both the model
dis_gen.compile(Adam(lr=0.0004, beta_1=0.5),
                loss='binary_crossentropy',
                metrics=['binary_accuracy'])

# visulizzation of combined model
dis_gen.summary()

###################### train full model ##################
Exemplo n.º 11
0
# 第一个参数是neurons的个数,也就是输出的dimension
g.add(layers.LeakyReLU(
    alpha=0.3))  # 对上述输出加一个actionvation,可以直接在dense上加参数activation
g.add(Dense(784, activation='sigmoid'))  # 生成28*28的图片

# Discrinimator
d = Sequential()  #初始化一个NN网络
d.add(Dense(256, input_dim=784))  # 输入为784维,28*28的图片,256个neurons
d.add(layers.LeakyReLU(alpha=0.3))

d.add(Dense(1, activation='sigmoid'))  # 输出一个0-1之间的sclar
# 定义lossfunction,优化器等,实现training
d.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

# GAN
d.trainable = False  #冻结discriminator,防止在generator训练期间更新参数
inputs = Input(shape=(z_dim, ))

# hidden层为generator
hidden = g(inputs)
# output层为dscriminator
output = d(hidden)
gan = Model(inputs, output)  #给定一个输入张量和输出张量,可以实例化一个model,输入的shape和途中的网络
gan.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])


# Training
def train(epochs=1, plt_frq=1, BATCH_SIZE=256):
    # 将训练集分为468个batchsize
    batchCount = int(X_train.shape[0] / BATCH_SIZE)
    print('Epochs:', epochs)
Exemplo n.º 12
0
# Deconv 6
model.add(Conv2DTranspose(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv6'))

# Final layer - only including one channel so 3 filter
model.add(Conv2DTranspose(3, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Final'))

### End of network ###


# Using a generator to help the model use less data
# Channel shifts help with shadows slightly
datagen = ImageDataGenerator(channel_shift_range=0.2)
datagen.fit(X_train)

# Compiling and training the model
model.compile(optimizer='Adam', loss='mean_squared_error')
model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), steps_per_epoch=len(X_train)/batch_size,
epochs=epochs, verbose=1, validation_data=(X_val, y_val))

# Freeze layers since training is done
model.trainable = False
model.compile(optimizer='Adam', loss='mean_squared_error')

# Save model architecture and weights
model.save('Model.h5')

# Show summary of model
#model.summary()

Exemplo n.º 13
0
discriminator.add(
    Conv2D(128, kernel_size=(3, 3), strides=(2, 2), padding='same'))
discriminator.add(LeakyReLU(leaky_alpha))
discriminator.add(Dropout(dropout))

discriminator.add(Flatten())
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy',
                      optimizer=optimizers.Adam(lr=5e-4, beta_1=0.5))

print(discriminator.summary())

# ### GAN

# Combined network
discriminator.trainable = False
ganInput = Input(shape=(noise_size, ))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy',
            optimizer=optimizers.Adam(lr=1e-4, beta_1=0.5))

# ## Learning Phase

epochs = 50
batchSize = 128
g_loss_hist = []
d_loss_hist = []
soft_up = 0.9
soft_down = 0.1
Exemplo n.º 14
0
	
	generator.add(TimeDistributed(Conv2DTranspose(3, (5,5), strides=(2,2), padding='same', activation='sigmoid')))
	print generator.output_shape

	print "Building Encoder..."
	encoder = Sequential()
	print (None, num_samples)
	encoder.add(Embedding(num_samples, PARAM_SIZE, input_length=1, embeddings_initializer=RandomNormal(stddev=1e-4)))
	encoder.add(Flatten(data_format = 'channels_last'))
	print encoder.output_shape
	
print "Building GANN..."
d_optimizer = Adam(lr=LR_D, beta_1=BETA_1, epsilon=EPSILON)
g_optimizer = Adam(lr=LR_G, beta_1=BETA_1, epsilon=EPSILON)

discriminator.trainable = True
generator.trainable = False
encoder.trainable = False
d_in_real = Input(shape=y_shape[1:])
d_in_fake = Input(shape=x_shape[1:])
d_fake = generator(encoder(d_in_fake))
d_out_real = discriminator(d_in_real)
d_out_real = Activation('linear', name='d_out_real')(d_out_real)
d_out_fake = discriminator(d_fake)
d_out_fake = Activation('linear', name='d_out_fake')(d_out_fake)
dis_model = Model(inputs=[d_in_real, d_in_fake], outputs=[d_out_real, d_out_fake])
dis_model.compile(
	optimizer=d_optimizer,
	loss={'d_out_real':'binary_crossentropy', 'd_out_fake':'binary_crossentropy'},
	loss_weights={'d_out_real':1.0, 'd_out_fake':1.0})
Exemplo n.º 15
0
# Example randomly generated images before training
plt.figure(figsize=(10, 10))
for i in range(0, 9):
    plt.subplot(3, 3, i+1)
    x = initial_generated_images[i].reshape((28, 28))
    plt.imshow(x, cmap='gray')
plt.savefig(os.path.join('figures', 'random_z_vectors.png'))

# Combine real and fake data to pretrain the classifier
X = np.concatenate((X_real, initial_generated_images))
n = X_real.shape[0]
y = np.zeros(int(2*n))
y[n:] = 1
y = y.astype(int)

discriminator.trainable = True
discriminator.fit(X, y, nb_epoch=1, batch_size=300)

# Determine the accuracy of the model
y_hat = discriminator.predict_on_batch(X)
accuracy = (2*n - np.sum(np.abs(y - y_hat.round().transpose()))) / (2*n)
print('Accuracy = {}'.format(accuracy))

# Train generative adversarial net for 2,000 epochs with 300 batch size
# once the model trains with 2000 epochs, lower the learning rate
gen_losses = {"d":[], "g":[], "f":[]}
train_for_n(z_input=z_input_vector, generator_model=generator,
            discriminator_model=discriminator, gan_model=gan,
            z_training_figures=z_training_figures,
            z_group=z_group_matrix,
            z_plot_freq=z_plot_freq, visualize_train=True,
Exemplo n.º 16
0
def init_models_2():

    adam = Adam(lr=learning_rate, beta_1=0.5)

    generator = Sequential()

    generator.add(
        Dense(256,
              input_dim=gen_in_dim,
              kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    generator.add(LeakyReLU(alpha=0.2))
    # generator.add(Dropout(gen_dropout))

    generator.add(Dense(512))
    generator.add(LeakyReLU(alpha=0.2))
    # generator.add(Dropout(gen_dropout))

    generator.add(Dense(1024))
    generator.add(LeakyReLU(alpha=0.2))
    # generator.add(Dropout(gen_dropout))

    # generator.add(Dense(2048))
    # generator.add(LeakyReLU(alpha=0.2))

    generator.add(Dense(2560))
    generator.add(LeakyReLU(alpha=0.2))

    generator.add(Dense(output_dim, activation='tanh'))
    generator.compile(optimizer=adam, loss='binary_crossentropy')

    discriminator = Sequential()

    discriminator.add(
        Dense(2560,
              input_dim=output_dim,
              kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    discriminator.add(Dense(1024))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    discriminator.add(Dense(512))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    # discriminator.add(MinibatchDiscrimination(5, 3))

    discriminator.add(Dense(256))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dropout(disc_dropout))

    discriminator.add(
        Dense(1, activation='sigmoid'
              ))  #binary classification (real or fake = 1 or 0 respectively)
    discriminator.compile(optimizer=adam, loss='binary_crossentropy')

    # creating gan
    discriminator.trainable = False
    ganInput = Input(shape=(gen_in_dim, ))
    x = generator(ganInput)
    ganOutput = discriminator(x)
    gan = Model(inputs=ganInput, outputs=ganOutput)
    gan.compile(loss='binary_crossentropy', optimizer=adam)

    return generator, discriminator, gan
Exemplo n.º 17
0
 def gen_GAN(generator: KM.Sequential, discriminator: KM.Sequential):
     model = KM.Sequential()
     model.add(generator)
     discriminator.trainable = False
     model.add(discriminator)
     return model
Exemplo n.º 18
0
#Here we define the labels used to train the gan
train_labels = np.zeros(2 * batch_size, dtype=int)
train_labels[0:batch_size] = 1  #generated images

for epoch in range(number_of_epochs):
    print("\rEpoch:", epoch + 1, "of", number_of_epochs, end='')
    for _ in range(batch_size):
        input_noise = np.random.normal(loc=mu,
                                       scale=sigma,
                                       size=[batch_size, input_dim])
        generated_images = generator_model.predict(input_noise)
        np.random.shuffle(train_indices)
        image_batch = train_data[train_indices[0:batch_size]]
        train_images = np.concatenate((image_batch, generated_images))
        #Training the discriminator
        discriminator_model.trainable = True
        discriminator_model.train_on_batch(train_images, train_labels)
        #Training the gan
        discriminator_model.trainable = False
        train_noise = np.random.normal(loc=mu,
                                       scale=sigma,
                                       size=[batch_size, input_dim])
        gan.train_on_batch(train_noise, y)

#In order to visualize the training progress, we employ the following code.
    if epoch % 100 == 0:
        n_examples = 10
        scale_image = 1 * n_examples
        noise = np.random.normal(loc=mu,
                                 scale=sigma,
                                 size=(n_examples, input_dim))
Exemplo n.º 19
0
def evaluate(generating_train_percentage, nb_epoch, dim, wanted_digit, save_model=False):
    (X_train, y_train), _ = mnist.load_data()

    X_train = numpy.reshape(X_train, (X_train.shape[0], numpy.multiply(X_train.shape[1], X_train.shape[2])))
    X_train = X_train.astype('float32')
    X_train /= float(255)
    wanted_digits = []
    for i in range(X_train.shape[0]):
        if y_train[i] == wanted_digit:
            wanted_digits.append(X_train[i])
    wanted_digits = numpy.array(wanted_digits)

    desc = Sequential()
    gen = Sequential()

    desc.add(Dense(input_dim=784, output_dim=250))
    desc.add(Activation('sigmoid'))
    desc.add(Dense(1))
    desc.add(Activation('sigmoid'))

    gen.add(Dense(input_dim=3000, output_dim=1500))
    gen.add(Activation('relu'))
    gen.add(Dense(784))
    gen.add(Activation('sigmoid'))

    desc.trainable = False
    gen.add(desc)
    gen.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True),
                metrics=['accuracy'])

    desc.trainable = True
    desc.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True),
                 metrics=['accuracy'])

    batch_size = 32
    fig = plt.figure()
    fixed_noise = numpy.random.rand(1, dim).astype('float32')
    generating_train_percentage = int(1 / generating_train_percentage)
    if not os.path.exists(str(wanted_digit) + "/"):
        os.makedirs(str(wanted_digit))
    for iter in range(nb_epoch):
        gen_acc = 0
        desc_acc = 0
        gen_count = 0
        desc_count = 0
        for (first, last) in zip(range(0, wanted_digits.shape[0] - batch_size, batch_size),
                                 range(batch_size, wanted_digits.shape[0], batch_size)):
            noise_batch = numpy.random.rand(batch_size, dim).astype('float32')
            fake_samples = passThroughGenerativeModel(noise_batch, gen)
            true_n_fake = numpy.concatenate([wanted_digits[first: last],
                                             fake_samples], axis=0)
            y_batch = numpy.concatenate([numpy.ones((batch_size, 1)),
                                         numpy.zeros((batch_size, 1))], axis=0).astype('float32')
            all_fake = numpy.ones((batch_size, 1)).astype('float32')
            if iter % generating_train_percentage == 0 and iter != 0:
                gen_acc += gen.train_on_batch(noise_batch, all_fake)[1]
                gen_count += 1
            else:
                desc_acc += desc.train_on_batch(true_n_fake, y_batch)[1]
                desc_count += 1
        if gen_count != 0:
            gen_acc /= float(gen_count)
            print("Generative accuracy %s" % gen_acc)
        if desc_count != 0:
            desc_acc /= float(desc_count)
            print("Descriptive accuracy %s" % desc_acc)

        fixed_fake = passThroughGenerativeModel(fixed_noise, gen)
        fixed_fake *= 255
        plt.clf()
        plt.imshow(fixed_fake.reshape((28, 28)), cmap='gray')
        plt.axis('off')
        fig.canvas.draw()
        plt.savefig(str(wanted_digit) + "/Iter " + str(iter) + '.png')
        if desc_count != 0 and desc_acc <= 0.5:
            break
    if save_model:
        gen.save_weights(str(wanted_digit) + "/genModel.weights")
        open(str(wanted_digit) + "/genModel.structure", "w").write(gen.to_json())
sgd = SGD(lr=0.01, momentum=0.1)
decoder.compile(loss='binary_crossentropy', optimizer=sgd)

print "Setting up generator"
generator = Sequential()
generator.add(Dense(2048*2, input_dim=2048, activation='relu'))
generator.add(Dense(1024*8, activation='relu'))
generator.add(Dense(32768, activation='linear'))

generator.compile(loss='binary_crossentropy', optimizer=sgd)

print "Setting up combined net"
gen_dec = Sequential()
gen_dec.add(generator)
decoder.trainable=False
gen_dec.add(decoder)

#def inverse_binary_crossentropy(y_true, y_pred):
#    if theano.config.floatX == 'float64':
#        epsilon = 1.0e-9
#    else:
#        epsilon = 1.0e-7
#    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
#    bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=-1)
#    return -bce
#
#gen_dec.compile(loss=inverse_binary_crossentropy, optimizer=sgd)

gen_dec.compile(loss='binary_crossentropy', optimizer=sgd)
 
generator = Sequential()
generator.add(Dense(mid_dim / 4, input_dim=sample_dim, activation='tanh'))
generator.add(Dropout(dropout_rate))
generator.add(Dense(mid_dim / 2, activation='tanh'))
generator.add(Dropout(dropout_rate))
generator.add(Dense(mid_dim, activation='tanh'))
generator.add(Dropout(dropout_rate))
generator.add(Dense(data_dim, activation='sigmoid'))
# generate fake sample
sample_fake = K.function([generator.input, K.learning_phase()], generator.output)



discriminator.trainable = False
generator.add(Dropout(dropout_rate))
generator.add(discriminator)


opt_g = Adam(lr=.0001)
generator.compile(loss='binary_crossentropy', optimizer=opt_g)

opt_d = Adam(lr=.002) #the learning rate of discriminator should be faster
discriminator.trainable = True
discriminator.compile(loss='binary_crossentropy', optimizer=opt_d)


u_dist = numpy.random.uniform(-1, 1, (1000, sample_dim)).astype('float32')
gn_dist = sample_fake([u_dist, 0])
Exemplo n.º 22
0
g.add(Dense(512, activation=LeakyReLU(alpha=0.2)))
g.add(Dense(1024, activation=LeakyReLU(alpha=0.2)))
g.add(Dense(784, activation='sigmoid'))
g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

d = Sequential()
d.add(Dense(1024, input_dim=28 * 28, activation=LeakyReLU(alpha=0.2)))
d.add(Dropout(0.3))
d.add(Dense(512, activation=LeakyReLU(alpha=0.2)))
d.add(Dropout(0.3))
d.add(Dense(256, activation=LeakyReLU(0.3)))
d.add(Dropout(0.3))
d.add(Dense(1, activation='sigmoid'))
d.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

d.trainable = False
inputs = Input((z_dim, ))
hidden = g(inputs)
output = d(hidden)
gan = Model(inputs, output)
gan.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])


def plot_loss(losses):
    d_loss = [v[0] for v in losses['D']]
    g_loss = [v[0] for v in losses['G']]

    plt.figure(figsize=(10, 8))
    plt.plot(d_loss, label='Discriminator loss')
    plt.plot(g_loss, label='Generator loss')
    plt.xlabel('Epochs')
Exemplo n.º 23
0
sampler.add(lrelu())
sampler.add(Dense(dim))
sampler.add(lrelu())
sampler.add(Dense(mnist_dim))
sampler.add(Activation('sigmoid'))

# This is G itself!!!
sample_fake = theano.function([sampler.get_input()], sampler.get_output())

# We add the detector G on top, but it won't be adapted with this cost function.
# But here is a dirty hack: Theano shared variables on the GPU are the same for
# `detector` and `detector_no_grad`, so, when we adapt `detector` the values of
# `detector_no_grad` will be updated as well. But this only happens following the
# correct gradients.
# Don't you love pointers? Aliasing can be our friend sometimes.
detector.trainable = False
sampler.add(detector)

opt_g = Adam(lr=.001) # I got better results when
                      # detector's learning rate is faster
sampler.compile(loss='binary_crossentropy', optimizer=opt_g)

# debug
opt_d = Adam(lr=.002)
detector.trainable = True
detector.compile(loss='binary_crossentropy', optimizer=opt_d)
detector.predict(np.ones((3, mnist_dim))).shape



nb_epoch = 1000 # it takes some time to get something recognizable.
Exemplo n.º 24
0
def ReturnModel():
    inp1 = Input((128, 128, 1))

    x = Conv2D(
        16,
        (3, 3),
        padding='same',
    )(inp1)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    #---------------------------------------------------------
    x = Conv2D(32, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    #---------------------------------------------------------
    x = Conv2D(64, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    #---------------------------------------------------------
    x = Conv2D(64, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    #---------------------------------------------------------
    x = Conv2D(64, (3, 3), padding='same')(x)
    x = Activation('tanh')(x)
    x = MaxPooling2D(pool_size=(2, 2), padding='same')(x)
    #---------------------------------------------------------
    encoder = Model(inputs=inp1, outputs=x)
    rms = keras.optimizers.Adam(lr=0.001)
    encoder.compile(optimizer=rms, loss='mse')

    encoder.load_weights("models/tanh_en_2_24_en.h5")
    encoder.trainable = False
    rms = keras.optimizers.Adam(lr=0.001)
    encoder.compile(optimizer=rms, loss='mse')
    encoder.summary()

    #Time distributed ENCODER
    modelpre = Sequential()
    modelpre.add(TimeDistributed(encoder, input_shape=(3, 128, 128, 1)))
    modelpre.add(TimeDistributed(Flatten(), input_shape=(3, 1024)))
    modelpre.trainable = False

    rms = keras.optimizers.Adam(lr=0.001)
    modelpre.compile(optimizer=rms, loss='mse')

    inp = Input((4, 4, 64))
    x = UpSampling2D((2, 2))(inp)
    #---------------------------------------------------------
    x = Conv2D(64, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = UpSampling2D((2, 2))(x)
    #---------------------------------------------------------
    x = Conv2D(32, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = UpSampling2D((2, 2))(x)
    #---------------------------------------------------------
    x = Conv2D(16, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = UpSampling2D((2, 2))(x)
    #---------------------------------------------------------
    x = Conv2D(16, (3, 3), padding='same')(x)
    #x = Activation('relu')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = UpSampling2D((2, 2))(x)
    #---------------------------------------------------------
    x = Conv2D(1, (3, 3), padding='same')(x)
    output = Activation('sigmoid')(x)
    #---------------------------------------------------------
    decoder = Model(inputs=inp, outputs=output)
    rms = keras.optimizers.Adam(lr=0.001)
    decoder.compile(optimizer=rms, loss='mse')

    decoder.load_weights("models/tanh_de_2_24_en.h5")
    decoder.trainable = False
    rms = keras.optimizers.Adam(lr=0.001)
    decoder.compile(optimizer=rms, loss='mse')
    decoder.summary()
    #####################################################################################################################
    #---------------------------------MIDDLE LAYER----------------------------------------------------------------------------
    #####################################################################################################################

    encoder_inputs = Input(shape=(3, 1024))
    encoder = LSTM(1024, input_shape=(3, 1024), return_state=True)
    encoder_outputs, state_h, state_c = encoder(encoder_inputs)
    encoder_states = [state_h, state_c]

    decoder_inputs = Input((1, 1024))
    decoder_lstm = LSTM(1024, return_sequences=True, return_state=False)
    decoder_outputs = decoder_lstm(decoder_inputs,
                                   initial_state=encoder_states)

    modelmid = Model([encoder_inputs, decoder_inputs], decoder_outputs)

    rms = keras.optimizers.Adam(lr=0.001)
    modelmid.compile(optimizer=rms, loss='mse')

    modelmid.load_weights("models/tanh_seq2seq.h5")

    #Dense layer and output a RELU 1024-1024 dense layer. Add

    rms = keras.optimizers.Adam(lr=0.001)
    modelmid.compile(optimizer=rms, loss='mse', metrics=['accuracy'])
    modelmid.summary()

    #SUPERMODEL
    inp1 = Input((3, 128, 128, 1))
    inp2 = Input((1, 1024))

    x = modelpre(inp1)
    x = modelmid([x, inp2])
    x = Reshape((4, 4, 64))(x)
    output = decoder(x)

    generator = Model(inputs=[inp1, inp2], outputs=output)
    rms = keras.optimizers.Adam(lr=0.001)
    generator.compile(optimizer=rms, loss='mse')
    return generator
Exemplo n.º 25
0
plt.title('Training and Validation accurarcy')
plt.legend()
plt.show()

#Train and validation loss
plt.plot(epochs, loss, 'blue', label='Training loss')
plt.plot(epochs, val_loss, 'red', label='Validation loss')
plt.title('Training and Validation loss')
plt.legend()
plt.show()


#Printing the Model Training Accuracy
print("[INFO] Calculating model accuracy")
scores = model.evaluate(X_val, y_val)
print("Test Accuracy: {scores[1]*100}")




# Freeze layers since training is done
model.trainable = False
model.compile(optimizer='Adam', loss='mean_squared_error')

# Save model architecture and weights
model.save('test_full_CNN_model.h5')

# Show summary of model
model.summary()

Exemplo n.º 26
0
#Classifier
model = Sequential()
model.add(
    Conv2D(64, (7, 7),
           activation='relu',
           input_shape=input_shape,
           padding='valid'))
model.add(Conv2D(128, (7, 7), activation='relu', padding='valid'))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(256, (7, 7), activation='relu', padding='valid'))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.trainable = True

g_gen = Sequential()
g_gen.add(Dense(1600, activation='relu', input_shape=(64, )))
g_gen.add(Reshape((5, 5, 64)))
g_gen.add(Conv2DTranspose(512, (5, 5), activation='relu', padding='valid'))
g_gen.add(Conv2DTranspose(256, (5, 5), activation='relu', padding='valid'))
g_gen.add(Conv2DTranspose(256, (7, 7), activation='relu', padding='valid'))
g_gen.add(Conv2DTranspose(1, (10, 10), activation='linear', padding='valid'))
g_gen.trainable = True

g_disc = Sequential()
g_disc.add(
    Conv2D(256, (3, 3),
           activation='relu',
           input_shape=input_shape,
Exemplo n.º 27
-1
def test_nested_model_trainability():
    # a Sequential inside a Model
    inner_model = Sequential()
    inner_model.add(Dense(2, input_dim=1))

    x = Input(shape=(1,))
    y = inner_model(x)
    outer_model = Model(x, y)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []

    # a Sequential inside a Sequential
    inner_model = Sequential()
    inner_model.add(Dense(2, input_dim=1))
    outer_model = Sequential()
    outer_model.add(inner_model)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []

    # a Model inside a Model
    x = Input(shape=(1,))
    y = Dense(2)(x)
    inner_model = Model(x, y)
    x = Input(shape=(1,))
    y = inner_model(x)
    outer_model = Model(x, y)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []

    # a Model inside a Sequential
    x = Input(shape=(1,))
    y = Dense(2)(x)
    inner_model = Model(x, y)
    outer_model = Sequential()
    outer_model.add(inner_model)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []