def adversarial_compile(self, loss='binary_crossentropy', schedule=None): dm, dmop = self.dmBuilder(self.get_discriminator()[0], do_compile=False) am, amop = self.amBuilder(self.get_generator(), self.get_discriminator(), do_compile=False) self.am = am self.dm = dm ## self.gan = Model( inputs=am.inputs + dm.inputs, outputs=am.outputs+dm.outputs ) self.player_models = (Model(inputs=am[0].inputs + dm.inputs, outputs=am[0].outputs + dm.outputs), Model(inputs=am[1].inputs + dm.inputs, outputs=am[1].outputs + dm.outputs)) self.model = AdversarialModel( player_models=self.player_models, ## base_model=self.gan, player_params=[ self.get_discriminator()[0].trainable_weights, self.get_generator().trainable_weights ], player_names=["discriminator", "generator"]) ## optimizer = AdversarialOptimizerSimultaneousWithLoops(nloops=nloops) if not schedule is None: optimizer = AdversarialOptimizerScheduled(schedule) else: optimizer = AdversarialOptimizerSimultaneous() print(loss) self.model.adversarial_compile(adversarial_optimizer=optimizer, player_optimizers=[amop, dmop], loss=loss)
def gan_model_test(): latent_dim = 10 input_dim = 5 generator = model_generator(input_dim=input_dim, latent_dim=latent_dim) discriminator = model_discriminator(input_dim=input_dim) gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim,))) # build adversarial model model = AdversarialModel(base_model=gan, player_params=[generator.trainable_weights, discriminator.trainable_weights], player_names=["generator", "discriminator"]) adversarial_optimizer = AdversarialOptimizerSimultaneous() opt_g = Adam(1e-4) opt_d = Adam(1e-3) loss = 'binary_crossentropy' model.adversarial_compile(adversarial_optimizer=adversarial_optimizer, player_optimizers=[opt_g, opt_d], loss=loss) # train model batch_size = 32 n = batch_size * 8 x = np.random.random((n, input_dim)) y = gan_targets(n) fit(model, x, y, nb_epoch=3, batch_size=batch_size)
def main(): latent_dim = 100 generator = model_generator() discriminator = model_discriminator() example_gan(AdversarialOptimizerSimultaneous(), "output/gan-cifar10", opt_g=Adam(1e-4, decay=1e-5), opt_d=Adam(1e-3, decay=1e-5), nb_epoch=100, generator=generator, discriminator=discriminator, latent_dim=latent_dim)
def main(): # z \in R^100 latent_dim = 100 # x \in R^{28x28} # generator (z -> x) generator = model_generator() # discriminator (x -> y) discriminator = model_discriminator() example_gan(AdversarialOptimizerSimultaneous(), "output/gan-cifar10", opt_g=Adam(1e-4, decay=1e-5), opt_d=Adam(1e-3, decay=1e-5), nb_epoch=100, generator=generator, discriminator=discriminator, latent_dim=latent_dim)
def GANTrain(params): X_train = pd.read_hdf( "data/atlas-higgs_experiment{0}_train_{1}.hdf".format(params['experiment'], params['jet_num']), "X") \ .values.astype(np.float32) y_train = pd.read_hdf( "data/atlas-higgs_experiment{0}_train_{1}.hdf".format( params['experiment'], params['jet_num']), "y") X_test = pd.read_hdf( "data/atlas-higgs_experiment{0}_test_{1}.hdf".format(params['experiment'], params['jet_num']), "X") \ .values.astype(np.float32) # y_test = pd.read_hdf( # "data/atlas-higgs_experiment{0}_test_{1}.hdf".format(params['experiment'], params['jet_num']), "y") X_b = X_train[y_train == 0] ss = StandardScaler() ss.fit(X_b) X_b = ss.transform(X_b) X_test = ss.transform(X_test) rows, cols = X_b.shape ss = MinMaxScaler() ss.fit(X_b) ss.fit(X_test) latent_dim = 2 # generator (z -> x) generator = generator_model(latent_dim, cols, layers=[25, 50], activation=LeakyReLU) # discriminator (x -> y) discriminator = discriminator_model(cols, layers=[30, 10, 5], activation=LeakyReLU, dropout=0.5) example_gan(AdversarialOptimizerSimultaneous(), "scripts/results/altas-higgs_gan_esperiment{0}_jn{1}".format( params['experiment'], params['jet_num']), X_b, opt_g=Adam(1e-4), opt_d=Adam(1e-4), nb_epoch=500, generator=generator, discriminator=discriminator, latent_dim=latent_dim, params=params)
def main(): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # discriminator (x -> y) discriminator = model_discriminator(input_shape) example_gan(AdversarialOptimizerSimultaneous(), "output/gan", opt_g=Adam(1e-4, decay=1e-4), opt_d=Adam(1e-3, decay=1e-4), nb_epoch=100, generator=generator, discriminator=discriminator, latent_dim=latent_dim)
def main(): input_shape = (160, 2) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # discriminator (x -> y) discriminator = model_discriminator(input_shape) G = train_gan(AdversarialOptimizerSimultaneous(), "output/gan2", opt_g=Adam(1e-4, decay=1e-4), opt_d=Adam(1e-3, decay=1e-4), nb_epoch=200, generator=generator, discriminator=discriminator, latent_dim=latent_dim) return G
def main(): latent_dim = 100 input_shape = (1, 28, 28) generator = model_generator() discriminator = model_discriminator(input_shape=input_shape) gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim, ))) generator.summary() discriminator.summary() gan.summary() model = AdversarialModel(base_model=gan, player_params=[ generator.trainable_weights, discriminator.trainable_weights ], player_names=["generator", "discriminator"]) model.adversarial_compile( adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy') generator_cb = ImageGridCallback( "output/gan_convolutional/epoch-{:03d}.png", generator_sampler(latent_dim, generator)) xtrain, xtest = mnist_data() xtrain = dim_ordering_fix(xtrain.reshape((-1, 1, 28, 28))) xtest = dim_ordering_fix(xtest.reshape((-1, 1, 28, 28))) y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb], nb_epoch=100, batch_size=32) df = pd.DataFrame(history.history) df.to_csv("output/gan_convolutional/history.csv") generator.save("output/gan_convolutional/generator.h5") discriminator.save("output/gan_convolutional/discriminator.h5")
def main(): # set path root_dir = os.path.abspath('.') data_dir = os.path.join(root_dir, 'MData') # load data train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv')) # test = pd.read_csv(os.path.join(data_dir, 'test.csv')) temp = [] for img_name in train.filename: image_path = os.path.join(data_dir, 'Train', 'Images', 'train', img_name) img = imread(image_path, flatten=True) img = img.astype('float32') temp.append(img) train_x = np.stack(temp) train_x = train_x / 255 epochs = 1 batch_size = 128 model_1 = model_generator_cifar() model_2 = model_discriminator_cifar() # gan = simple_gan(model_1, model_2, normal_latent_sampling((100,))) latent_dim = 100 gan = simple_gan(model_1, model_2, latent_sampling=normal_latent_sampling((latent_dim,))) model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights]) model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy') history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=epochs, batch_size=batch_size) zsamples = np.random.normal(size=(10, 100)) pred = model_1.predict(zsamples) for i in range(pred.shape[0]): plt.imshow(pred[i, :], cmap='gray') plt.savefig('out/animals/'+str(i)+'.png')
def main(): # to stop potential randomness seed = 128 rng = np.random.RandomState(seed) # set path root_dir = os.path.abspath('.') data_dir = os.path.join(root_dir, 'Data') # load data train = pd.read_csv(os.path.join(data_dir, 'Train', 'train.csv')) # test = pd.read_csv(os.path.join(data_dir, 'test.csv')) temp = [] for img_name in train.filename: image_path = os.path.join(data_dir, 'Train', 'Images', 'train', img_name) img = imread(image_path, flatten=True) img = img.astype('float32') temp.append(img) train_x = np.stack(temp) train_x = train_x / 255 # print image img_name = rng.choice(train.filename) filepath = os.path.join(data_dir, 'Train', 'Images', 'train', img_name) img = imread(filepath, flatten=True) # pylab stuff, who f****n knows # pylab.imshow(img, cmap='gray') # pylab.axis('off') # pylab.show() # Levers g_input_shape = 100 d_input_shape = (28, 28) hidden_1_num_units = 500 hidden_2_num_units = 500 g_output_num_units = 784 d_output_num_units = 1 epochs = 25 batch_size = 128 # generator model_1 = Sequential([ Dense(units=hidden_1_num_units, input_dim=g_input_shape, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=g_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)), Reshape(d_input_shape), ]) # discriminator model_2 = Sequential([ InputLayer(input_shape=d_input_shape), Flatten(), Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)), ]) gan = simple_gan(model_1, model_2, normal_latent_sampling((100, ))) model = AdversarialModel( base_model=gan, player_params=[model_1.trainable_weights, model_2.trainable_weights]) model.adversarial_compile( adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy') history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size) zsamples = np.random.normal(size=(10, 100)) pred = model_1.predict(zsamples) for i in range(pred.shape[0]): plt.imshow(pred[i, :], cmap='gray') plt.savefig('out/numbers/' + str(i) + '.png')
def main(): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # encoder (x ->z) encoder = model_encoder(latent_dim, input_shape) # autoencoder (x -> x') autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs))) # discriminator (x -> y) discriminator = model_discriminator(latent_dim, input_shape) # bigan (x - > yfake, yreal), z generated on GPU bigan = simple_bigan(generator, encoder, discriminator, normal_latent_sampling((latent_dim, ))) generative_params = generator.trainable_weights + encoder.trainable_weights # print summary of models generator.summary() encoder.summary() discriminator.summary() bigan.summary() autoencoder.summary() # build adversarial model model = AdversarialModel( base_model=bigan, player_params=[generative_params, discriminator.trainable_weights], player_names=["generator", "discriminator"]) model.adversarial_compile( adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy') # train model xtrain, xtest = mnist_data() def generator_sampler(): zsamples = np.random.normal(size=(10 * 10, latent_dim)) return generator.predict(zsamples).reshape((10, 10, 28, 28)) generator_cb = ImageGridCallback("output/bigan/generated-epoch-{:03d}.png", generator_sampler) def autoencoder_sampler(): xsamples = n_choice(xtest, 10) xrep = np.repeat(xsamples, 9, axis=0) xgen = autoencoder.predict(xrep).reshape((10, 9, 28, 28)) xsamples = xsamples.reshape((10, 1, 28, 28)) x = np.concatenate((xsamples, xgen), axis=1) return x autoencoder_cb = ImageGridCallback( "output/bigan/autoencoded-epoch-{:03d}.png", autoencoder_sampler) y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb, autoencoder_cb], nb_epoch=100, batch_size=32) df = pd.DataFrame(history.history) df.to_csv("output/bigan/history.csv") encoder.save("output/bigan/encoder.h5") generator.save("output/bigan/generator.h5") discriminator.save("output/bigan/discriminator.h5")
# discriminator model_2 = Sequential([ InputLayer(input_shape=d_input_shape), Flatten(), Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1e-5, 1e-5)), ]) print model_1.summary() print model_2.summary() from keras_adversarial import AdversarialModel, simple_gan, gan_targets from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling gan = simple_gan(model_1, model_2, normal_latent_sampling((100,))) model = AdversarialModel(base_model=gan,player_params=[model_1.trainable_weights, model_2.trainable_weights]) model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy') print gan.summary() history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=10, batch_size=batch_size)
gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim,))) #load ancien weights : generator.load_weights('./output_500/gan_convolutional/generator.h5') discriminator.load_weights('./output_500/gan_convolutional/discriminator.h5') # print summary of models generator.summary() discriminator.summary() gan.summary() # build adversarial model model = AdversarialModel(base_model=gan, player_params=[generator.trainable_weights, discriminator.trainable_weights], player_names=["generator", "discriminator"]) model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy', player_compile_kwargs=[{'metrics':['accuracy']},{'metrics':['accuracy']}]) # train model generator_cb = ImageGridCallback("output_600/gan_convolutional/epoch-{:03d}.png", generator_sampler(latent_dim, generator)) fname = "base_hiver_2008.pklgz" with gzip.open(fname, "rb") as fp: dictio = pickle.load(fp) data = dictio['SSTMW'] x = data[:, :92, :92].astype(np.float32) xtrain = x[:-10]
def main(): example_bigan("output/bigan", AdversarialOptimizerSimultaneous())
def example_aae(path, adversarial_optimizer): # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (64, 64) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # encoder (x ->z) encoder = model_encoder(latent_dim, input_shape) # autoencoder (x -> x') autoencoder = model_encoder(latent_dim, input_shape) #autoencoder = Model(encoder.inputs, generator(encoder(encoder.inputs))) # discriminator (z -> y) discriminator = model_discriminator(latent_dim) # assemple AAE x = autoencoder.inputs[0] #z = encoder(x) xpred = autoencoder(x) #zreal = normal_latent_sampling((latent_dim,))(x) yreal = discriminator(concatenate([x,xpred],axis=3)) #yfake = discriminator(z) aae = Model(x, fix_names([xpred, yreal], ["xpred", "yreal"])) # print summary of models generator.summary() encoder.summary() discriminator.summary() autoencoder.summary() adversarial_optimizer = AdversarialOptimizerSimultaneous() model = AdversarialModel(base_model=aae, player_params=[autoencoder.trainable_weights, discriminator.trainable_weights], player_names=["autoencoder", "discriminator"]) model.adversarial_compile(adversarial_optimizer=adversarial_optimizer, player_optimizers=['adam', Adam(1e-3, decay=1e-3)], loss={"yreal": "binary_crossentropy", "xpred": masked_mse}, player_compile_kwargs=[{"loss_weights": {"yreal": 1e-2, "xpred": 1}}] * 2) History=model.fit(x=xtrain, y=y, validation_data=(xval, yval),epochs=100, batch_size=15) Outputs = model.predict(xtest) print(Outputs[0].shape) Outputs = Outputs[0].reshape(Outputs[0].shape[0],64,64) #plt.figure() #plt.imshow(xtest[1,:,:,0]) #plt.colorbar() #plt.show() #plt.figure() #plt.imshow(Outputs[1,:,:]) #plt.colorbar() #plt.show() #plt.figure() #plt.imshow(ytest[0,1,:,:]) #plt.colorbar() #plt.show() return (History, Outputs)
def main(): data_dir = "goldens_filtered_32x32_gray/" out_dir = "m_gan_out/" epochs = 1 batch_size = 64 # TODO: Research why these values were chosen opt_g = Adam(1e-4, decay=1e-5) opt_d = Adam(1e-3, decay=1e-5) loss = 'binary_crossentropy' latent_dim = 100 adversarial_optimizer = AdversarialOptimizerSimultaneous() # My simple models # generator = get_generator() # discriminator = get_discriminator() # CIFAR example convolutional models generator = get_generator_cifar() discriminator = get_discriminator_cifar() gan = simple_gan(generator, discriminator, normal_latent_sampling((latent_dim, ))) # print summary of models generator.summary() discriminator.summary() gan.summary() # build adversarial model model = AdversarialModel(base_model=gan, player_params=[ generator.trainable_weights, discriminator.trainable_weights ], player_names=["generator", "discriminator"]) model.adversarial_compile(adversarial_optimizer=adversarial_optimizer, player_optimizers=[opt_g, opt_d], loss=loss) temp = [] for img_name in os.listdir(data_dir): image_path = data_dir + img_name img = imread(image_path) img = img.astype('float32') temp.append(img) train_x = np.stack(temp) train_x = train_x / 255 # Side effects model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=epochs, batch_size=batch_size) zsamples = np.random.normal(size=(10, latent_dim)) pred = generator.predict(zsamples) for i in range(pred.shape[0]): plt.imshow(pred[i, :]) plt.savefig(out_dir + str(i) + '.png')
history = model.fit(x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=[generator_cb], nb_epoch=nb_epoch, batch_size=32) df = pd.DataFrame(history.history) df.to_csv(csvpath) generator.save(os.path.join(path, "generator.h5")) discriminator.save(os.path.join(path, "discriminator.h5")) if __name__ == "__main__": # z \in R^100 latent_dim = 100 # x \in R^{28x28} input_shape = (28, 28) # generator (z -> x) generator = model_generator(latent_dim, input_shape) # discriminator (x -> y) discriminator = model_discriminator(input_shape) example_gan(AdversarialOptimizerSimultaneous(), "output/gan", opt_g=Adam(1e-4, decay=1e-4, clipvalue=2.0), opt_d=Adam(1e-3, decay=1e-4, clipvalue=2.0), nb_epoch=100, generator=generator, discriminator=discriminator, latent_dim=latent_dim)
def main(): (Hist, Out)=example_aae("output/aae", AdversarialOptimizerSimultaneous()) return (Hist,Out)
def __init__(self, uNum, iNum, dim, weight, pop_percent): self.uNum = uNum self.iNum = iNum self.dim = dim self.weight = weight self.pop_percent = pop_percent # Define user input -- user index (an integer) userInput = Input(shape=(1,), dtype="int32") itemInput = Input(shape=(1,), dtype="int32") userAdvInput = Input(shape=(1,), dtype="int32") itemAdvInput = Input(shape=(1,), dtype="int32") userEmbeddingLayer = Embedding(input_dim=uNum, output_dim=dim) itemEmbeddingLayer = Embedding(input_dim=iNum, output_dim=dim) uEmb = Flatten()(userEmbeddingLayer(userInput)) iEmb = Flatten()(itemEmbeddingLayer(itemInput)) uAdvEmb = Flatten()(userEmbeddingLayer(userAdvInput)) iAdvEmb = Flatten()(itemEmbeddingLayer(itemAdvInput)) self.uEncoder = Model(userInput, uEmb) self.iEncoder = Model(itemInput, iEmb) self.discriminator_i = self.generate_discriminator() self.discriminator_i.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy']) self.discriminator_i.trainable = False validity = self.discriminator_i(iAdvEmb) self.discriminator_u = self.generate_discriminator() self.discriminator_u.compile(optimizer="adam", loss="binary_crossentropy", metrics=['accuracy']) self.discriminator_u.trainable = False validity_u = self.discriminator_u(uAdvEmb) pred = dot([uEmb, iEmb], axes=-1) # pred = merge([uEmb, iEmb], mode="concat") self.model = Model([userInput, itemInput], pred) # self.model.compile(optimizer="adam", loss="mean_squared_error", metrics=['mse']) # self.advModel = Model([userInput, itemInput, userAdvInput, itemAdvInput], [pred, validity_u, validity]) # self.advModel.compile(optimizer="adam", # loss=["mean_squared_error", "binary_crossentropy", "binary_crossentropy"], # metrics=['mse', 'acc', 'acc'], loss_weights=[1, self.weight, self.weight]) self.aae = Model([userInput, itemInput, userAdvInput, itemAdvInput], fix_names([pred, validity_u, validity], ["xpred", "upred", "ipred"])) mf_params = self.uEncoder.trainable_weights + self.iEncoder.trainable_weights self.advModel = AdversarialModel(base_model=self.aae, player_params=[mf_params, self.discriminator_u.trainable_weights, self.discriminator_i.trainable_weights], player_names=["mf", "disc_u", "disc_i"]) self.advModel.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(), Adam(), Adam()], loss={"upred": "binary_crossentropy", "ipred": "binary_crossentropy", "xpred": "mean_squared_error"}, player_compile_kwargs=[{"loss_weights": {"upred": 1, "ipred": 1, "xpred": 1}}] * 2)
def main(): example_aae("output/aae", AdversarialOptimizerSimultaneous())
def main(is_large_model, file_source, epochs, per_epoch, verbose, output_directory, loss, gen_lr, disc_lr, gen_reg, disc_reg): if is_large_model: print("Initiating large model...") util.print_model_parameters(file_source, epochs, per_epoch, verbose, output_directory, loss, gen_lr, disc_lr, gen_reg, disc_reg) sys.stdout.flush() latent_dim = 600 input_shape = (64, 64, 7) generator = em_generator_large(latent_dim, input_shape, reg=lambda: l1l2(gen_reg, gen_reg)) discriminator = em_discriminator_large( input_shape, reg=lambda: l1l2(disc_reg, disc_reg)) train_em_gan(AdversarialOptimizerSimultaneous(), generator, discriminator, Adam(gen_lr), Adam(disc_lr), latent_dim, file_source, "/volumes/raw", input_shape, output_directory, verbose=verbose, epochs=epochs, per_epoch=per_epoch, loss=loss, r_id=("large_" + str(gen_lr) + "_" + str(disc_lr) + "_" + str(gen_reg) + "_" + str(disc_reg)), is_large_model=True) else: print("Initiating small model...") print_model_parameters(file_source, epochs, per_epoch, verbose, output_directory, loss, gen_lr, disc_lr, gen_reg, disc_reg) sys.stdout.flush() latent_dim = 300 input_shape = (24, 24, 12) generator = em_generator(latent_dim, input_shape, reg=lambda: l1l2(gen_reg, gen_reg)) discriminator = em_discriminator(input_shape, reg=lambda: l1l2(disc_reg, disc_reg)) train_em_gan(AdversarialOptimizerSimultaneous(), generator, discriminator, Adam(gen_lr), Adam(disc_lr), latent_dim, file_source, "/volumes/raw", input_shape, output_directory, verbose=verbose, epochs=epochs, per_epoch=per_epoch, loss=loss, r_id=(str(gen_lr) + "_" + str(disc_lr)))
def main(): cars_aae("output/cars_aae", AdversarialOptimizerSimultaneous())
def mnist_data(): (xtrain,ytrain),(xtest,ytest)=mnist.load_data() return mnist_process(xtrain),mnist_process(xtest) if __name__ == "__main__": latent_dim = 100 input_shape=(1,28,28) generator = model_generator() discriminator = model_discriminator(input_shape=input_shape) gan = simple_gan(generator,discriminator,normal_latent_sampling((latent_dim,))) generator.summary() discriminator.summary() gan.summary() model = AdversarialModel(base_model=gan,player_params=[generator.trainable_weights,discriminator.trainable_weights],player_names=["generator","discriminator"]) model.adversarial_compile(adversarial_optimizer=AdversarialOptimizerSimultaneous(),player_optimizers=[Adam(1e-4,decay=1e-4),Adam(1e-3,decay=1e-4)],loss='binary_crossentropy') def generator_sampler(): zsamples = np.random.normal(size=(10*10,latent_dim)) gen = dim_ordering_unfix(generator.predict(zsamples)) return gen.reshape((10,10,28,28)) generator_cb = ImageGridCallback("output/gan_convolutional/epoch-{:03d}.png",generator_sampler) xtrain,xtest=mnist_data() xtrain=dim_ordering_fix(xtrain.reshape((-1,1,28,28))) xtest=dim_ordering_fix(xtest.reshape((-1,1,28,28))) y = gan_targets(xtrain.shape[0]) ytest = gan_targets(xtest.shape[0]) history = model.fit(x=xtrain,y=y,validation_data=(xtest,ytest),callbacks=[generator_cb],nb_epoch=100,batch_size=32) df = pd.DataFrame(history.history) df.to_csv("output/gan_convolutional/history.csv")
denoising_autoencoder_fit(x_train, x_train_noisy, dimension=50, optimizer="adadelta", loss_function="binary_crossentropy", nb_epoch=100, batch_size=20, path='./feature_extractiondenoisingAE/'+aaenum+'/') ''' ######### Deep Autoencoder ################ ''' deep_autoencoder_fit(x_train, dimension=50, optimizer="adadelta", loss_function="binary_crossentropy", nb_epoch=100, batch_size=20, path='./feature_extraction/DeepAE/'+aaenum+'/') ''' ############## AAE ############## aae_model('./feature_extraction/AAE/', AdversarialOptimizerSimultaneous(), xtrain=x_train, encoded_dim=50,img_dim=x_train.shape[1], nb_epoch=100) ################ Variational Autoencoder #################### ''' vae_model_single('./feature_extraction/VAE/'+aaenum+'/',x_train.shape[1], x_train,intermediate_dim=1000,batch_size=20,latent_dim=50,epochs=100) ''' #index = dataset.iloc[0:20482,0] # this is for valiadtion data index = file_1.iloc[0:20439,0] ################ load model ######################### weight = load_model('./feature_extraction/AAE/aae_encoder.h5')
def gan(): # define variables # 初始化一些参数 g_input_shape = 100 # 生成器输入层节点数 d_input_shape = (28, 28) # 辨别器输入层节点数 hidden_1_num_units = 500 hidden_2_num_units = 500 g_output_num_units = 784 # 生成器输出层节点数28*28 d_output_num_units = 1 # 辨别器输出层节点数1个,辨别是否是真实图片 epochs = 100 batch_size = 128 # 定义生成器,用于生成图片 model_g = Sequential([ Dense(units=hidden_1_num_units, input_dim=g_input_shape, activation='relu', kernel_regularizer=L1L2(1e-5, 1e-5)), Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1E-5, 1E-5)), Dense(units=g_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1E-5, 1E-5)), Reshape(d_input_shape) ]) # 定义分辨器,用于辨别图片 model_d = Sequential([ InputLayer(input_shape=d_input_shape), Flatten(), Dense(units=hidden_1_num_units, activation='relu', kernel_regularizer=L1L2(1E-5, 1E-5)), Dense(units=hidden_2_num_units, activation='relu', kernel_regularizer=L1L2(1E-5, 1E-5)), Dense(units=d_output_num_units, activation='sigmoid', kernel_regularizer=L1L2(1E-5, 1E-5)) ]) # model_g.summary() # model_d.summary() from keras_adversarial import AdversarialModel, simple_gan, gan_targets from keras_adversarial import AdversarialOptimizerSimultaneous, normal_latent_sampling # 开始训练gan网络 gan = simple_gan(model_g, model_d, normal_latent_sampling((100, ))) # gan.summary() # 在keras2.2.x版本中,下面的代码会报错,keras2.1.2中不会 model = AdversarialModel( base_model=gan, player_params=[model_g.trainable_weights, model_d.trainable_weights]) model.adversarial_compile( adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=['adam', 'adam'], loss='binary_crossentropy') # 使用训练数据进行训练 # 把keras_adversarial clone到了本地,然后替换掉了pip安装的keras_adversarial # 解决了这个报错AttributeError: 'AdversarialModel' object has no attribute '_feed_output_shapes' history = model.fit(x=train_x, y=gan_targets(train_x.shape[0]), epochs=epochs, batch_size=batch_size) # 保存为h5文件 model_g.save_weights('gan1_g.h5') model_d.save_weights('gan1_d.h5') model.save_weights('gan1.h5') # 绘制训练结果的loss plt.plot(history.history['player_0_loss'], label='player_0_loss') plt.plot(history.history['player_1_loss'], label='player_1_loss') plt.plot(history.history['loss'], label='loss') plt.show() # 训练之后100次之后生成的图像 # 随机生成10组数据,生成10张图像 zsample = np.random.normal(size=(10, 100)) pred = model_g.predict(zsample) print(pred.shape) # (10,28,28) for i in range(pred.shape[0]): plt.imshow(pred[i, :], cmap='gray') plt.show()
save_image_cb = ImageGridCallback('./dcgan-v2-images/' + exp_dir + '/epoch-{:03d}.png', generator_sampler, cmap=None) save_model_cb = SaveModelWeights(generator, './dcgan-v2-model-weights/' + exp_dir) # train model xtrain, xtest = svhn_data() y = targets(xtrain.shape[0]) ytest = targets(xtest.shape[0]) callbacks = [save_image_cb, save_model_cb] #train model epoch_start = 0 epoch_count = 100 history = fit(model, x=xtrain, y=y, validation_data=(xtest, ytest), callbacks=callbacks, nb_epoch=epoch_start + epoch_count, batch_size=32, initial_epoch = epoch_start, shuffle=True) # save history to CSV df = pd.DataFrame(history.history) df.to_csv('./dcgan-v2-images/' + exp_dir + '/history.csv') #save final models generator.save('./dcgan-v2-model-weights/' + exp_dir + '/generator.h5') discriminator.save('./dcgan-v2-model-weights/' + exp_dir + '/discriminator.h5') if __name__ == "__main__": latent_dim = 100 #input_dim generator = fccgan_generator(bnmode=1) discriminator = fccgan_discriminator_pooling(bnmode=1) run_gan('fccgan_pooling', AdversarialOptimizerSimultaneous(), opt_g=Adam(0.0001, decay=1e-5), opt_d=Adam(0.0001, decay=1e-5), generator=generator, discriminator=discriminator, latent_dim=latent_dim)
#discriminator.load_weights('./output/gan_convolutional/discriminator.h5') # print summary of models generator.summary() discriminator.summary() gan.summary() # build adversarial model model = AdversarialModel(base_model=gan, player_params=[ generator.trainable_weights, discriminator.trainable_weights ], player_names=["generator", "discriminator"]) model.adversarial_compile( adversarial_optimizer=AdversarialOptimizerSimultaneous(), player_optimizers=[Adam(1e-4, decay=1e-4), Adam(1e-3, decay=1e-4)], loss='binary_crossentropy', player_compile_kwargs=[{ 'metrics': ['accuracy'] }, { 'metrics': ['accuracy'] }]) # train model generator_cb = ImageGridCallback( "output/gan_convolutional/epoch-{:03d}.png", generator_sampler(latent_dim, generator)) fname = "base_hiver_2008.pklgz"
def main(): driver_gan("output/result", AdversarialOptimizerSimultaneous())