def discriminator(height, width, channels, learning_rate, embedding_dim=64): input_image = Input((height, width, channels)) disc = Conv2D(128, 4, strides=2, padding="same")(input_image) disc = LeakyReLU()(disc) disc = Conv2D(256, 4, strides=2, padding="same")(disc) disc = LeakyReLU()(disc) #disc = Conv2D(256, 4,strides=2,padding="same")(disc) #disc = LeakyReLU()(disc) disc = Conv2D(512, 4, strides=2, padding="same")(disc) #disc = BatchNormalization()(disc) disc = LeakyReLU()(disc) disc = Conv2D(1024, 4, strides=2, padding="same")(disc) disc = LeakyReLU()(disc) disc = Flatten()(disc) disc = Dropout(0.4)(disc) disc = Dense(1, activation="sigmoid")(disc) disc = Model(input_image, disc) discriminator_optimizer = Adam(lr=LEARNING_RATE, beta_1=0.5) disc.compile(discriminator_optimizer, loss="binary_crossentropy", metrics=['binary_accuracy']) return disc
# end of the graph creation discriminator = Model(inputs=dis_input, outputs=discriminator) # Compilation ############# # generator # the optimizer optimizer = Adam(lr=2E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # load & compilation if LOAD: generator.load_weights(SAVE_PATH + 'generator_model.h5') _gnerator = generator # save an uncompile version of the generator generator.compile(loss=vgg_loss, optimizer=optimizer) # discriminator (use the same opti then the generator) # load & compile if LOAD: discriminator.loss_weights(SAVE_PATH + 'discriminator_model.h5') discriminator.compile(loss="binary_crossentropy", optimizer=optimizer, metrics=["accuracy"]) # gann # the optimizer optimizer = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # creat the graph based on generator and discriminator