def train(epochs, batch_size): downscale_factor = 4 batch_count = int(x_train_hr.shape[0] / batch_size) shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, image_shape[2]) generator = Generator(shape).generator() discriminator = Discriminator(image_shape).discriminator() adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) generator.compile(loss=vgg_loss, optimizer=adam) discriminator.compile(loss="binary_crossentropy", optimizer=adam) shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, 3) gan = get_gan_network(discriminator, shape, generator, adam) for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in range(batch_count): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] generated_images_sr = generator.predict(image_batch_lr) real_data_Y = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 fake_data_Y = np.random.random_sample(batch_size) * 0.2 discriminator.trainable = True d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) d_loss = 0.5 * np.add(d_loss_fake, d_loss_real) rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] gan_Y = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 discriminator.trainable = False loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y]) print("Loss HR , Loss LR, Loss GAN") print(d_loss_real, d_loss_fake, loss_gan) if e % 300 == 0: generator.save('./output/gen_model%d.h5' % e)
def train(epochs, batch_size): psnr_v=[] epochs_v = [] downscale_factor = 4 batch_count = int(x_train_hr.shape[0] / batch_size) shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2]) generator = Generator(shape).generator() discriminator = Discriminator(image_shape).discriminator() adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) generator.compile(loss=vgg_loss, optimizer=adam) discriminator.compile(loss="binary_crossentropy", optimizer=adam) shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, 3) gan = get_gan_network(discriminator, shape, generator, adam) for e in range(1, epochs+1): print ('-'*15, 'Epoch %d' % e, '-'*15) for _ in range(batch_count): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] generated_images_sr = generator.predict(image_batch_lr) real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 fake_data_Y = np.random.random_sample(batch_size)*0.2 discriminator.trainable = True d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) d_loss = 0.5 * np.add(d_loss_fake, d_loss_real) rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 discriminator.trainable = False loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y]) print("Loss HR , Loss LR, Loss GAN") print(d_loss_real, d_loss_fake, loss_gan) if e % 300 == 0: generator.save('./model/gen_model%d.h5' % e) model = load_model('./model/gen_model%d.h5'%e , custom_objects={'vgg_loss': loss.vgg_loss}) model.predict(x_train_lr[rand_nums[0]]) generated_image = denormalize(gen_img) psnr_v.append(sewar.psnr(x_train_hr[rand_nums[0]],generated_image,MAX=None)) epochs_v.append(e) plt.plot(psnr_v,epochs_v) plt.savefig('./model' + 'progress%d.png'%e)
def train(epochs, batch_size): data= [] batch_count = int(images_hr_train.shape[0] / batch_size) shape = image_shape_LR generator = Generator(shape).generator() discriminator = Discriminator(image_shape_HR).discriminator() adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) generator.compile(loss=vgg_loss, optimizer=adam) discriminator.compile(loss="binary_crossentropy", optimizer=adam) gan = get_gan_network(discriminator, shape, generator, adam) for e in range(1, epochs+1): print ('-'*15, 'Epoch %d' % e, '-'*15) for _ in range(batch_count): rand_nums = np.random.randint(0, images_hr_train.shape[0], size=batch_size) image_batch_hr = images_hr_train[rand_nums] image_batch_lr = images_lr_train[rand_nums] generated_images_sr = generator.predict(image_batch_lr) real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 fake_data_Y = np.random.random_sample(batch_size)*0.2 discriminator.trainable = True d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) #d_loss = 0.5 * np.add(d_loss_fake, d_loss_real) rand_nums = np.random.randint(0, images_hr_train.shape[0], size=batch_size) image_batch_hr = images_hr_train[rand_nums] image_batch_lr = images_lr_train[rand_nums] gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 discriminator.trainable = False loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y]) print("Loss HR , Loss LR, Loss GAN") print(d_loss_real, d_loss_fake, loss_gan) data= data + [(e,d_loss_real, d_loss_fake, loss_gan)] if e == 1 or e % 10 == 0: plot_generated_images(e, generator) if e % 100 == 0: generator.save('./output/gen_model%d.h5' % e) discriminator.save('./output/dis_model%d.h5' % e) gan.save('./output/gan_model%d.h5' % e) with open('LossData.csv','w') as out: csv_out=csv.writer(out) csv_out.writerow(['epoch','Loss HR' , 'Loss LR', 'Loss GAN']) for row in data: csv_out.writerow(row)
def one_image(): lr_shape = (64,64,3) loss = VGG_LOSS(lr_shape) optimizer = get_optimizer() last_epoch_number = Utils.get_last_epoch_number(MODEL_DIR+'last_model_epoch.txt') gen_model = MODEL_DIR+"inception_gen_model"+str(last_epoch_number)+".h5" generator = Generator(lr_shape,DOWNSCALE_FACTOR,"inception").generator() generator.load_weights(gen_model) generator.compile(loss=loss.vgg_loss, optimizer=optimizer) Utils.generate_one_image(INPUT_DIR,generator,OUTPUT_DIR)
def output_image(dir, x_test_hr_raw): x_test_lr = lr_images([cv2.imread(x_test_hr_raw)], 2) # make a compressed image _, sa, sb, sc = x_test_lr.shape shape = (sa, sb, sc) _, generator = Generator(shape).generator() generator.load_weights(dir) x_test_lr = normalize(x_test_lr) gen_img = generator.predict(x_test_lr) generated_image = denormalize(gen_img) # print(generated_image.shape) generated_image = generated_image[0] # print(generated_image.shape) cv2.imwrite('./generated.png', generated_image) print('Output generated.png') x_test_lr = denormalize(x_test_lr) x_test_lr = x_test_lr[0] cv2.imwrite('./interpolate.png', x_test_lr) print('Output interpolate.png')
def plot_image(): lr_shape = (64,64,3) loss = VGG_LOSS(lr_shape) optimizer = get_optimizer() image = cv2.imread(INPUT_DIR) image_lr = Utils.normalize(image) last_epoch_number = Utils.get_last_epoch_number(MODEL_DIR+'last_model_epoch.txt') gen_model = MODEL_DIR+"inception_gen_model"+str(last_epoch_number)+".h5" generator = Generator(lr_shape,DOWNSCALE_FACTOR,"inception").generator() generator.load_weights(gen_model) generator.compile(loss=loss.vgg_loss, optimizer=optimizer) gen_img = generator.predict(np.expand_dims(image_lr,axis=0)) sr_image = Utils.denormalize(gen_img) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) Utils.generate_two_plot(sr_image,image,OUTPUT_DIR)
def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio): x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data( input_dir, '.png', number_of_images, train_test_ratio) loss = VGG_LOSS(image_shape) batch_count = int(x_train_hr.shape[0] / batch_size) shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, image_shape[2]) # Not good generator, _ = Generator(shape).generator() discriminator = Discriminator(image_shape).discriminator() optimizer = Utils_model.get_optimizer() generator.compile(loss=loss.vgg_loss, optimizer=optimizer) discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss) loss_file = open(model_save_dir + 'losses.txt', 'w+') loss_file.close() for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in tqdm(range(batch_count)): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] generated_images_sr = generator.predict(image_batch_lr) real_data_Y = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 fake_data_Y = np.random.random_sample(batch_size) * 0.2 discriminator.trainable = True d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real) rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] gan_Y = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 discriminator.trainable = False gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y]) print("discriminator_loss : %f" % discriminator_loss) print("gan_loss :", gan_loss) gan_loss = str(gan_loss) loss_file = open(model_save_dir + 'losses.txt', 'a') loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' % (e, gan_loss, discriminator_loss)) loss_file.close() if e == 1 or e % 5 == 0: Utils.plot_generated_images(output_dir, e, generator, x_test_hr, x_test_lr) # generator.save(model_save_dir + 'gen_model%d.h5' % e) # generator.save_weights(model_save_dir + 'gen_w%d.h5' % e) # exit() if e % 500 == 0: # generator.save(model_save_dir + 'gen_model%d.h5' % e) generator.save_weights(model_save_dir + 'gen_w%d.h5' % e)
def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio, image_extension): # Loading images x_train_lr, x_train_hr, x_test_lr, x_test_hr = \ Utils.load_training_data(input_dir, image_extension, image_shape, number_of_images, train_test_ratio) print('======= Loading VGG_loss ========') # Loading VGG loss loss = VGG_LOSS(image_shape) loss2 = VGG_LOSS(image_shape) print('====== VGG_LOSS =======', loss) batch_count = int(x_train_hr.shape[0] / batch_size) print('====== Batch_count =======', batch_count) shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, image_shape[2]) print('====== Shape =======', shape) # Generator description generator = Generator(shape).generator() complex_generator = complex_Generator(shape).generator() # Discriminator description discriminator = Discriminator(image_shape).discriminator() discriminator2 = Discriminator(image_shape).discriminator() optimizer = Utils_model.get_optimizer() generator.compile(loss=loss.vgg_loss, optimizer=optimizer) complex_generator.compile(loss=loss2.vgg_loss, optimizer=optimizer) discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) discriminator2.compile(loss="binary_crossentropy", optimizer=optimizer) gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss) complex_gan = get_gan_network(discriminator2, shape, complex_generator, optimizer, loss2.vgg_loss) loss_file = open(model_save_dir + 'losses.txt', 'w+') loss_file.close() for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in tqdm(range(batch_count)): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] generated_images_sr = generator.predict(image_batch_lr) generated_images_csr = complex_generator.predict(image_batch_lr) real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2 fake_data_Y = np.random.random_sample(batch_size) * 0.2 discriminator.trainable = True discriminator2.trainable = True d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real) d_loss_creal = discriminator2.train_on_batch(image_batch_hr, real_data_Y) d_loss_cfake = discriminator2.train_on_batch(generated_images_csr, fake_data_Y) discriminator_c_loss = 0.5 * np.add(d_loss_cfake, d_loss_creal) ######## rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2 discriminator.trainable = False discriminator2.trainable = False gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y]) gan_c_loss = complex_gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y]) print("discriminator_loss : %f" % discriminator_loss) print("gan_loss :", gan_loss) print("gan_c_loss :", gan_c_loss) gan_loss = str(gan_loss) loss_file = open(model_save_dir + 'losses.txt', 'a') loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' % (e, gan_loss, discriminator_loss)) loss_file.close() if e % 1 == 0: Utils.plot_generated_images(output_dir, e, generator,complex_generator, x_test_hr, x_test_lr) if e % 50 == 0: generator.save(model_save_dir + 'gen_model%d.h5' % e) discriminator.save(model_save_dir + 'dis_model%d.h5' % e)
def train(epochs, batch_size, input_dir, tgt_dir, output_dir, model_save_dir, number_of_images, train_test_ratio, saved_model): x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data( input_dir, tgt_dir, '.npy', number_of_images, train_test_ratio) # x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data(input_dir, '.jpg', number_of_images, train_test_ratio) # x_train_hr = np.expand_dims(x_train_hr, axis=3) # x_test_hr = np.expand_dims(x_test_hr, axis=3) # x_train_hr = np.reshape(x_train_hr,(x_train_hr[0], x_train_hr[1], x_train_hr[2], 1)) # x_test_hr = np.reshape(x_test_hr, (x_test_hr[0], x_test_hr[1], x_test_hr[2], 1)) loss = VGG_LOSS(image_shape) batch_count = int(x_train_hr.shape[0] / batch_size) shape = (image_shape[0], image_shape[1], image_shape[2], image_shape[3]) # # generator = Generator(shape).generator() # # model = squeeze(Activation('tanh')(model), 4) # # # discriminator = Discriminator(dis_shape).discriminator() generator = Generator(shape).generator() generator_old = load_model(saved_model, custom_objects={'vgg_loss': loss.vgg_loss}) x_tmp = generator_old.layers[-2].output # generator_old.layers.pop() # generator_old.layers.pop() # for layers in generator_old.layers: # layers.trainable = False # define new layrs # x1 = Conv2D(filters = 64, kernel_size = 3, strides = 1, padding = "same") # x2 = Conv2D(filters=1, kernel_size=5, strides=1, padding="same") # x3 = Activation('tanh') # generator = Concatenate()[generator_old, x1,x2,x3] # gan_input = Input(shape=shape) # x_tmp = mid_out#generator_old(gan_input) # x_tmp = Conv3D(filters=64, kernel_size=9, strides=1, padding="same", name="TransConv2d_1")(x_tmp) x_tmp = Conv3D(filters=128, kernel_size=5, strides=1, padding="same", name="TransConv2d_2")(x_tmp) x_tmp = Conv3D(filters=32, kernel_size=3, strides=1, padding="same", name="TransConv2d_3")(x_tmp) x_tmp = Conv3D(filters=1, kernel_size=1, strides=1, padding="same", name="TransConv2d_4")(x_tmp) # gan_output = Activation('tanh')(x_tmp) generator = Model(inputs=generator_old.input, outputs=x_tmp) # fine tune the layers. for layers in generator.layers[:-4]: layers.trainable = False optimizer = Utils_model.get_optimizer() generator.compile(loss=loss.vgg_loss, optimizer=optimizer) # discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) # gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss) loss_file = open(model_save_dir + 'losses.txt', 'w+') loss_file.close() for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in tqdm(range(batch_count)): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] gan_loss = generator.train_on_batch(image_batch_lr, image_batch_hr) # print("discriminator_loss : %f" % discriminator_loss) print("gan_loss :", gan_loss) gan_loss = str(gan_loss) loss_file = open(model_save_dir + 'losses.txt', 'a') loss_file.write('epoch%d : Resnet_loss = %s ; \n' % (e, gan_loss)) loss_file.close() if e == 1 or e % 5 == 0: rand_nums = np.random.randint(0, x_test_hr.shape[0], size=batch_size) image_batch_hr = x_test_hr[rand_nums] image_batch_lr = x_test_lr[rand_nums] test_loss = generator.test_on_batch(image_batch_lr, image_batch_hr) print("test_loss :", test_loss) test_loss = str(test_loss) loss_file = open(model_save_dir + 'test_losses.txt', 'a') loss_file.write('epoch%d : test_loss = %s ; \n' % (e, test_loss)) loss_file.close() if e % 50 == 0: generator.save(model_save_dir + 'Resnet_model%d.h5' % e)
def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio, image_extension): # Loading images x_train_lr, x_train_hr, x_test_lr, x_test_hr = \ Utils.load_training_data(input_dir, image_extension, image_shape, number_of_images, train_test_ratio) # convert to loading PATCHES #num_samples = dataset_info['num_samples'][1] print('======= Loading VGG_loss ========') # Loading VGG loss # convert to 3 channels #img_input = Input(shape=original_image_shape) #image_shape_gray = Concatenate()([img_input, img_input, img_input]) #image_shape_gray = Concatenate()([original_image_shape, original_image_shape]) #image_shape_gray = Concatenate()([image_shape_gray,original_image_shape]) #image_shape = patch_shape #experimental_run_tf_function=False loss = VGG_LOSS(image_shape) # was image_shape print('====== VGG_LOSS =======', loss) # 1 channel #image_shape= original_image_shape batch_count = int(x_train_hr.shape[0] / batch_size) #batch_count = int(x_train_hr_patch.shape[0] / batch_size) # for patch print('====== Batch_count =======', batch_count) shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, image_shape[2] ) # commented by Furat #shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor) print('====== Shape =======', shape) # Generator description generator = Generator(shape).generator() # Discriminator description discriminator = Discriminator(image_shape).discriminator() optimizer = Utils_model.get_optimizer() generator.compile(loss=loss.vgg_loss, optimizer=optimizer) discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss) loss_file = open(model_save_dir + 'losses.txt', 'w+') loss_file.close() ## restore the patches into 1 image: # x_train_hr should have a whole image insted of patches? ###### # input_data= x_train_hr # patch_shape = train_conf['patch_shape'] # output_shape = train_conf['output_shape'] # num_chs = num_modalities * dataset_info['RGBch'] # number of total channels # if input_data.ndim == 6: # augmentation case # num_samples = dataset_info['num_samples'][1] #num_samples = 3 ###### #lr_data= x_train_lr #hr_data= x_train_hr #for patch_idx in range (num_patches): #this_input_data = np.reshape(input_data[:,:,patch_idx], input_data.shape[:2]+input_data.shape[3:]) #this_hr_patch, this_lr_patch = overlap_patching(gen_conf, train_conf, x_train_hr) #this_output_patch, out = overlap_patching(gen_conf, train_conf, output_data) # take patches: this_hr_patch, = extract_2d(x_train_hr, (32, 32)) this_lr_patch = extract_2d(x_train_lr, (32, 32)) x_train_lr = this_lr_patch x_train_hr = this_hr_patch #convert to grayscale #x_train_hr= tf.image.rgb_to_grayscale(x_train_hr) #x_train_hr= rgb2gray(x_train_hr) #x_train_hr= np.concatenate(x_train_hr,1) #x_train_hr= np.array(x_train_hr) #x_train_lr= tf.image.rgb_to_grayscale(x_train_lr) #x_train_lr= np.array(x_train_lr) #x_train_lr= rgb2gray(x_train_lr) #x_train_lr= np.concatenate(x_train_lr,1) for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in tqdm(range(batch_count)): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] generated_images_sr = generator.predict(image_batch_lr) real_data_Y = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 fake_data_Y = np.random.random_sample(batch_size) * 0.2 discriminator.trainable = True d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real) rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] gan_Y = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 discriminator.trainable = False gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y]) print("discriminator_loss : %f" % discriminator_loss) print("gan_loss :", gan_loss) gan_loss = str(gan_loss) loss_file = open(model_save_dir + 'losses.txt', 'a') loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' % (e, gan_loss, discriminator_loss)) loss_file.close() if e == 1 or e % 5 == 0: Utils.plot_generated_images(output_dir, e, generator, x_test_hr, x_test_lr) if e % 200 == 0: generator.save(model_save_dir + 'gen_model%d.h5' % e) discriminator.save(model_save_dir + 'dis_model%d.h5' % e)
plt.axis('off') plt.tight_layout() plt.savefig('output/gan_generated_image_epoch_%d.png' % epoch) def train(epochs=1, batch_size=128): losses_to_file = [] downscale_factor = 4 batch_count = int(x_train_hr.shape[0] / batch_size) shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2]) generator = Generator(shape).generator() discriminator = Discriminator(image_shape).discriminator() adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08) generator.compile(loss=vgg_loss, optimizer=adam) discriminator.compile(loss="binary_crossentropy", optimizer=adam) shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, 3) gan = get_gan_network(discriminator, shape, generator, adam) for e in range(1, epochs+1): print ('-'*15, 'Epoch %d' % e, '-'*15) for _ in range(batch_count): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
def train(epochs, batch_size, input_dir, tgt_dir, output_dir, model_save_dir, number_of_images, train_test_ratio): x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data( input_dir, tgt_dir, '.npy', number_of_images, train_test_ratio) # x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data(input_dir, '.jpg', number_of_images, train_test_ratio) # x_train_hr = np.expand_dims(x_train_hr, axis=3) # x_test_hr = np.expand_dims(x_test_hr, axis=3) # x_train_hr = np.reshape(x_train_hr,(x_train_hr[0], x_train_hr[1], x_train_hr[2], 1)) # x_test_hr = np.reshape(x_test_hr, (x_test_hr[0], x_test_hr[1], x_test_hr[2], 1)) loss = VGG_LOSS(image_shape) batch_count = int(x_train_hr.shape[0] / batch_size) shape = (image_shape[0], image_shape[1], image_shape[2], image_shape[3]) generator = Generator(shape).generator() # model = squeeze(Activation('tanh')(model), 4) # discriminator = Discriminator(dis_shape).discriminator() optimizer = Utils_model.get_optimizer() generator.compile(loss=loss.vgg_loss, optimizer=optimizer) # discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) # gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss) loss_file = open(model_save_dir + 'losses.txt', 'w+') loss_file.close() for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in tqdm(range(batch_count)): rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) image_batch_hr = x_train_hr[rand_nums] image_batch_lr = x_train_lr[rand_nums] generated_images_sr = generator.predict(image_batch_lr) # real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 # fake_data_Y = np.random.random_sample(batch_size)*0.2 # # discriminator.trainable = True # d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y) # d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y) # discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real) # # rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size) # image_batch_hr = x_train_hr[rand_nums] # image_batch_lr = x_train_lr[rand_nums] # # gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2 # discriminator.trainable = False gan_loss = generator.train_on_batch(image_batch_lr, image_batch_hr) # print("discriminator_loss : %f" % discriminator_loss) print("gan_loss :", gan_loss) gan_loss = str(gan_loss) loss_file = open(model_save_dir + 'losses.txt', 'a') loss_file.write('epoch%d : gan_loss = %s ; \n' % (e, gan_loss)) loss_file.close() if e == 1 or e % 5 == 0: flag = 1 # Utils.plot_generated_images(output_dir, e, generator, x_test_hr, x_test_lr) if e % 500 == 0: generator.save(model_save_dir + 'gen_model%d.h5' % e)
def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio, resume_train, downscale_factor, arch): config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) set_session(sess) hr_images, hr_label, lr_images, lr_label = Utils.load_training_data( input_dir, number_of_images) print(hr_images) loss = VGG_LOSS(IMAGE_SHAPE) lr_shape = (IMAGE_SHAPE[0] // downscale_factor, IMAGE_SHAPE[1] // downscale_factor, IMAGE_SHAPE[2]) print(lr_shape) generator = Generator(lr_shape, downscale_factor, arch).generator() discriminator = Discriminator(IMAGE_SHAPE).discriminator() optimizer = Utils_model.get_optimizer() if (resume_train == True): last_epoch_number = Utils.get_last_epoch_number(model_save_dir + 'last_model_epoch.txt') gen_model = model_save_dir + arch + "_gen_model" + str( last_epoch_number) + ".h5" dis_model = model_save_dir + arch + "_dis_model" + str( last_epoch_number) + ".h5" generator.load_weights(gen_model) discriminator.load_weights(dis_model) else: last_epoch_number = 1 generator.compile(loss=loss.vgg_loss, optimizer=optimizer) discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) gan = gan_network(discriminator, lr_shape, generator, optimizer, loss.vgg_loss) for e in range(last_epoch_number, last_epoch_number + epochs): print('-' * 15, 'Epoch %d' % e, '-' * 15) for _ in tqdm(range(1)): rand_nums = np.random.randint(0, hr_images.shape[0], size=batch_size) image_batch_hr = hr_images[rand_nums] image_batch_lr = lr_images[rand_nums] # video_images = lr_images[0] generated_images = generator.predict( image_batch_lr) #array of generated images real_data = np.ones( batch_size) - np.random.random_sample(batch_size) * 0.2 fake_data = np.random.random_sample(batch_size) * 0.2 discriminator.trainable = True discriminator_loss_real = discriminator.train_on_batch( image_batch_hr, real_data) discriminator_loss_fake = discriminator.train_on_batch( generated_images, fake_data) discriminator_loss = 0.5 * np.add( discriminator_loss_fake, discriminator_loss_real) #Mean Of Discriminator Loss rand_nums = np.random.randint(0, hr_images.shape[0], size=batch_size) discriminator.trainable = False gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr, real_data]) print("discriminator_loss : %f" % discriminator_loss) print("gan_loss : ", gan_loss) gan_loss = str(gan_loss) # generated_video_image = generator.predict(np.expand_dims(video_images,axis=0)) Utils.save_losses_file(model_save_dir, e, gan_loss, discriminator_loss, arch + '_losses.txt') # image_array.append(cv2.cvtColor(denormalize(generated_video_image[0]),cv2.COLOR_BGR2RGB)) if e % EPOCHS_CHECKPOINT == 0: Utils.save_losses_file(model_save_dir, e, gan_loss, discriminator_loss, 'last_model_epoch.txt') generator.save(model_save_dir + arch + '_gen_model%d.h5' % e) discriminator.save(model_save_dir + arch + '_dis_model%d.h5' % e)
def train(img_shape, epochs, batch_size, rescaling_factor, input_dirs, output_dir, model_save_dir, train_test_ratio, gpu=1): lr_shape = (img_shape[0] // rescaling_factor, img_shape[1] // rescaling_factor, img_shape[2]) img_train_gen, img_test_gen = create_data_generator( input_dirs[1], input_dirs[0], target_size_lr=(lr_shape[0], lr_shape[1]), target_size_hr=(img_shape[0], img_shape[1]), preproc_lr=rescale_imgs_to_neg1_1, preproc_hr=rescale_imgs_to_neg1_1, validation_split=train_test_ratio, batch_size=batch_size) batch_count = int( (len(os.listdir(os.path.join(input_dirs[1], 'ignore'))) / batch_size) * (1 - train_test_ratio)) test_image = [] for img in sorted(os.listdir(os.path.join(input_dirs[1], 'ignore'))): if 'niklas_city_0009' in img: test_image.append( rescale_imgs_to_neg1_1( cv2.imread(os.path.join(input_dirs[1], 'ignore', img)))) print("test length: ", len(test_image)) loss = VGG_LOSS(image_shape) generator = Generator(lr_shape, rescaling_factor).generator() print('memory usage generator: ', get_model_memory_usage(batch_size, generator)) optimizer = Utils_model.get_optimizer() if gpu > 1: try: print("multi_gpu_model generator") par_generator = multi_gpu_model(generator, gpus=2) except: par_generator = generator print("single_gpu_model generator") else: par_generator = generator print("single_gpu_model generator") par_generator.compile(loss=loss.loss, optimizer=optimizer) par_generator.summary() for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) for i in tqdm(range(batch_count)): batch = next(img_train_gen) image_batch_hr = batch[1] image_batch_lr = batch[0] if image_batch_hr.shape[0] == batch_size and image_batch_lr.shape[ 0] == batch_size: g_loss = par_generator.train_on_batch(image_batch_lr, image_batch_hr) else: print("weird multi_gpu_model batch error dis: ") print("hr batch shape: ", image_batch_hr.shape) print("lr batch shape: ", image_batch_lr.shape) #if e == 1 or e % 5 == 0: #Utils.generate_test_image(output_dir, e, generator, test_image) if e % 5 == 0: generator.save(os.path.join(model_save_dir, 'srresnet%d.h5' % e)) generator.save(os.path.join(model_save_dir, 'srresnet.h5' % e))
def train(img_shape, epochs, batch_size, rescaling_factor, input_dirs, output_dir, model_save_dir, train_test_ratio): lr_shape = (img_shape[0] // rescaling_factor, img_shape[1] // rescaling_factor, img_shape[2]) img_train_gen, img_test_gen = create_data_generator( input_dirs[1], input_dirs[0], target_size_lr=(lr_shape[0], lr_shape[1]), target_size_hr=(img_shape[0], img_shape[1]), preproc_lr=rescale_imgs_to_neg1_1, preproc_hr=rescale_imgs_to_neg1_1, validation_split=train_test_ratio, batch_size=batch_size) loss = VGG_LOSS(image_shape) batch_count = int( (len(os.listdir(os.path.join(input_dirs[1], 'ignore'))) / batch_size) * (1 - train_test_ratio)) test_image = [] for img in sorted(os.listdir(os.path.join(input_dirs[1], 'ignore'))): if 'niklas_city_0009' in img: test_image.append( rescale_imgs_to_neg1_1( cv2.imread(os.path.join(input_dirs[1], 'ignore', img)))) print("test length: ", len(test_image)) generator = Generator(lr_shape, rescaling_factor).generator() discriminator = Discriminator(img_shape).discriminator() print('memory usage generator: ', get_model_memory_usage(batch_size, generator)) print('memory usage discriminator: ', get_model_memory_usage(batch_size, discriminator)) optimizer = Utils_model.get_optimizer() try: print("multi_gpu_model generator") par_generator = multi_gpu_model(generator, gpus=2) except: par_generator = generator print("single_gpu_model generator") try: print("multi_gpu_model discriminator") par_discriminator = multi_gpu_model(discriminator, gpus=2) except: par_discriminator = discriminator print("single_gpu_model discriminator") par_generator.compile(loss=loss.loss, optimizer=optimizer) par_discriminator.compile(loss="binary_crossentropy", optimizer=optimizer) gan, par_gan = get_gan_network(par_discriminator, lr_shape, par_generator, optimizer, loss.loss, batch_size) par_discriminator.summary() par_generator.summary() par_gan.summary() loss_file = open(model_save_dir + 'losses.txt', 'w+') loss_file.close() for e in range(1, epochs + 1): print('-' * 15, 'Epoch %d' % e, '-' * 15) if e == 100: optimizer.lr = 1e-5 for i in tqdm(range(batch_count)): batch = next(img_train_gen) image_batch_hr = batch[1] image_batch_lr = batch[0] generated_images_sr = generator.predict(image_batch_lr) real_data_Y = np.ones(batch_size) - \ np.random.random_sample(batch_size)*0.2 fake_data_Y = np.random.random_sample(batch_size) * 0.2 par_discriminator.trainable = True if image_batch_hr.shape[0] == batch_size and image_batch_lr.shape[ 0] == batch_size: d_loss_real = par_discriminator.train_on_batch( image_batch_hr, real_data_Y) d_loss_fake = par_discriminator.train_on_batch( generated_images_sr, fake_data_Y) discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real) else: print("weird multi_gpu_model batch error dis: ") print("hr batch shape: ", image_batch_hr.shape) print("lr batch shape: ", image_batch_lr.shape) print("gan y shape: ", gan_Y.shape) batch = next(img_train_gen) image_batch_hr = batch[1] image_batch_lr = batch[0] gan_Y = np.ones(batch_size) - \ np.random.random_sample(batch_size)*0.2 discriminator.trainable = False if image_batch_hr.shape[0] == batch_size and image_batch_lr.shape[ 0] == batch_size: gan_loss = par_gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y]) else: print("weird multi_gpu_model batch error gan: ") print("hr batch shape: ", image_batch_hr.shape) print("lr batch shape: ", image_batch_lr.shape) print("gan y shape: ", gan_Y.shape) print("discriminator_loss : %f" % discriminator_loss) print("gan_loss :", gan_loss) gan_loss = str(gan_loss) loss_file = open(model_save_dir + '_losses.txt', 'a') loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' % (e, gan_loss, discriminator_loss)) loss_file.close() if e == 1 or e % 5 == 0: Utils.generate_test_image(output_dir, e, generator, test_image) if e % 5 == 0: generator.save(os.path.join(model_save_dir, 'gen_model%d.h5' % e)) discriminator.save( os.path.join(model_save_dir, 'dis_model%d.h5' % e))
_, testSet = createDataset(args.img_list, args.img_dir, 1) testloader = torch.utils.data.DataLoader(testSet, batch_size=1, shuffle=False, num_workers=1, pin_memory=True) print('Dataset initialized') print(args.c) if not (args.c): device = torch.device('cuda') else: device = torch.device('cpu') if args.c: G = Generator(noise_dim=64, num_classes=100) G.load_state_dict( torch.load(args.model, map_location=torch.device('cpu'))) else: G = torch.nn.DataParallel(Generator(noise_dim=64, num_classes=100)).to(device) G.module.load_state_dict(torch.load(args.model)) print('Network created') print('Finished loading checkpoints') G.eval() img_list = list() toPIL = transforms.ToPILImage()