Exemple #1
0
def train(epochs, batch_size):

    downscale_factor = 4

    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, image_shape[2])

    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)

    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, 3)
    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in range(batch_count):

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr,
                                                       real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr,
                                                       fake_data_Y)
            d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, gan_Y])

        print("Loss HR , Loss LR, Loss GAN")
        print(d_loss_real, d_loss_fake, loss_gan)

        if e % 300 == 0:
            generator.save('./output/gen_model%d.h5' % e)
Exemple #2
0
def train(epochs, batch_size):
    psnr_v=[]
    epochs_v = []

    downscale_factor = 4
    
    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, image_shape[2])
    
    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)
    
    shape = (image_shape[0]//downscale_factor, image_shape[1]//downscale_factor, 3)
    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in range(batch_count):
            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            fake_data_Y = np.random.random_sample(batch_size)*0.2
            
            discriminator.trainable = True
            
            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y])
            
        print("Loss HR , Loss LR, Loss GAN")
        print(d_loss_real, d_loss_fake, loss_gan)

        if e % 300 == 0:
            generator.save('./model/gen_model%d.h5' % e)
            model = load_model('./model/gen_model%d.h5'%e , custom_objects={'vgg_loss': loss.vgg_loss})
            model.predict(x_train_lr[rand_nums[0]])
            generated_image = denormalize(gen_img)
            psnr_v.append(sewar.psnr(x_train_hr[rand_nums[0]],generated_image,MAX=None))
            epochs_v.append(e)
            plt.plot(psnr_v,epochs_v)
            plt.savefig('./model' + 'progress%d.png'%e)
Exemple #3
0
def train(epochs, batch_size):
    data= []
    batch_count = int(images_hr_train.shape[0] / batch_size)
    shape = image_shape_LR
 
    generator = Generator(shape).generator()
    discriminator = Discriminator(image_shape_HR).discriminator()

    adam = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss=vgg_loss, optimizer=adam)
    discriminator.compile(loss="binary_crossentropy", optimizer=adam)
    

    gan = get_gan_network(discriminator, shape, generator, adam)

    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in range(batch_count):
            
            rand_nums = np.random.randint(0, images_hr_train.shape[0], size=batch_size)
            
            image_batch_hr = images_hr_train[rand_nums]
            image_batch_lr = images_lr_train[rand_nums]

            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            fake_data_Y = np.random.random_sample(batch_size)*0.2
            
            discriminator.trainable = True
            
            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            #d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
          
            rand_nums = np.random.randint(0, images_hr_train.shape[0], size=batch_size)
            image_batch_hr = images_hr_train[rand_nums]
            image_batch_lr = images_lr_train[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y])
            print("Loss HR , Loss LR, Loss GAN")
            print(d_loss_real, d_loss_fake, loss_gan) 
            data= data + [(e,d_loss_real, d_loss_fake, loss_gan)]
        if e == 1 or e % 10 == 0:
            plot_generated_images(e, generator)
        if e % 100 == 0:
            generator.save('./output/gen_model%d.h5' % e)
            discriminator.save('./output/dis_model%d.h5' % e)
            gan.save('./output/gan_model%d.h5' % e)
        with open('LossData.csv','w') as out:
            csv_out=csv.writer(out)
            csv_out.writerow(['epoch','Loss HR' , 'Loss LR', 'Loss GAN'])
            for row in data:
                csv_out.writerow(row)
Exemple #4
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir,
          number_of_images, train_test_ratio):
    x_train_lr, x_train_hr, x_test_lr, x_test_hr = Utils.load_training_data(
        input_dir, '.png', number_of_images, train_test_ratio)
    loss = VGG_LOSS(image_shape)

    batch_count = int(x_train_hr.shape[0] / batch_size)
    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, image_shape[2])  # Not good

    generator, _ = Generator(shape).generator()
    discriminator = Discriminator(image_shape).discriminator()

    optimizer = Utils_model.get_optimizer()
    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)
    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = get_gan_network(discriminator, shape, generator, optimizer,
                          loss.vgg_loss)

    loss_file = open(model_save_dir + 'losses.txt', 'w+')
    loss_file.close()

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batch_count)):
            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr,
                                                       real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr,
                                                       fake_data_Y)
            discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, gan_Y])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + 'losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' %
                        (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e == 1 or e % 5 == 0:
            Utils.plot_generated_images(output_dir, e, generator, x_test_hr,
                                        x_test_lr)
            # generator.save(model_save_dir + 'gen_model%d.h5' % e)
            # generator.save_weights(model_save_dir + 'gen_w%d.h5' % e)
            # exit()
        if e % 500 == 0:
            # generator.save(model_save_dir + 'gen_model%d.h5' % e)
            generator.save_weights(model_save_dir + 'gen_w%d.h5' % e)
Exemple #5
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir, number_of_images, train_test_ratio, image_extension):
    # Loading images
    x_train_lr, x_train_hr, x_test_lr, x_test_hr = \
        Utils.load_training_data(input_dir, image_extension, image_shape, number_of_images, train_test_ratio)

    print('======= Loading VGG_loss ========')
    # Loading VGG loss
    loss = VGG_LOSS(image_shape)
    loss2 = VGG_LOSS(image_shape)
    print('====== VGG_LOSS =======', loss)

    batch_count = int(x_train_hr.shape[0] / batch_size)
    print('====== Batch_count =======', batch_count)

    shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor, image_shape[2])
    print('====== Shape =======', shape)

    # Generator description
    generator = Generator(shape).generator()
    complex_generator = complex_Generator(shape).generator()
    # Discriminator description
    discriminator = Discriminator(image_shape).discriminator()
    discriminator2 = Discriminator(image_shape).discriminator()

    optimizer = Utils_model.get_optimizer()

    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)
    complex_generator.compile(loss=loss2.vgg_loss, optimizer=optimizer)

    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)
    discriminator2.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = get_gan_network(discriminator, shape, generator, optimizer, loss.vgg_loss)
    complex_gan = get_gan_network(discriminator2, shape, complex_generator, optimizer, loss2.vgg_loss)

    loss_file = open(model_save_dir + 'losses.txt', 'w+')

    loss_file.close()

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batch_count)):
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)
            generated_images_csr = complex_generator.predict(image_batch_lr)
            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True
            discriminator2.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            d_loss_creal = discriminator2.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_cfake = discriminator2.train_on_batch(generated_images_csr, fake_data_Y)
            discriminator_c_loss = 0.5 * np.add(d_loss_cfake, d_loss_creal)
            ########
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            discriminator2.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y])
            gan_c_loss = complex_gan.train_on_batch(image_batch_lr, [image_batch_hr, gan_Y])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        print("gan_c_loss :", gan_c_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + 'losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' % (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e % 1 == 0:
            Utils.plot_generated_images(output_dir, e, generator,complex_generator, x_test_hr, x_test_lr)
        if e % 50 == 0:
            generator.save(model_save_dir + 'gen_model%d.h5' % e)
            discriminator.save(model_save_dir + 'dis_model%d.h5' % e)
Exemple #6
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir,
          number_of_images, train_test_ratio, image_extension):

    # Loading images

    x_train_lr, x_train_hr, x_test_lr, x_test_hr = \
        Utils.load_training_data(input_dir, image_extension, image_shape, number_of_images, train_test_ratio)

    # convert to loading PATCHES
    #num_samples = dataset_info['num_samples'][1]

    print('======= Loading VGG_loss ========')
    # Loading VGG loss

    # convert to 3 channels
    #img_input = Input(shape=original_image_shape)
    #image_shape_gray = Concatenate()([img_input, img_input, img_input])
    #image_shape_gray = Concatenate()([original_image_shape, original_image_shape])
    #image_shape_gray = Concatenate()([image_shape_gray,original_image_shape])
    #image_shape = patch_shape
    #experimental_run_tf_function=False

    loss = VGG_LOSS(image_shape)  # was image_shape

    print('====== VGG_LOSS =======', loss)

    # 1 channel
    #image_shape= original_image_shape
    batch_count = int(x_train_hr.shape[0] / batch_size)
    #batch_count = int(x_train_hr_patch.shape[0] / batch_size) # for patch

    print('====== Batch_count =======', batch_count)

    shape = (image_shape[0] // downscale_factor,
             image_shape[1] // downscale_factor, image_shape[2]
             )  # commented by Furat
    #shape = (image_shape[0] // downscale_factor, image_shape[1] // downscale_factor)
    print('====== Shape =======', shape)

    # Generator description
    generator = Generator(shape).generator()

    # Discriminator description
    discriminator = Discriminator(image_shape).discriminator()

    optimizer = Utils_model.get_optimizer()

    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)

    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = get_gan_network(discriminator, shape, generator, optimizer,
                          loss.vgg_loss)

    loss_file = open(model_save_dir + 'losses.txt', 'w+')

    loss_file.close()

    ## restore the patches into 1 image:
    # x_train_hr should have a whole image insted of patches?

    ######
    # input_data= x_train_hr
    # patch_shape = train_conf['patch_shape']
    # output_shape = train_conf['output_shape']
    # num_chs = num_modalities * dataset_info['RGBch'] # number of total channels

    # if input_data.ndim == 6: # augmentation case
    #     num_samples = dataset_info['num_samples'][1]

    #num_samples = 3
    ######

    #lr_data= x_train_lr
    #hr_data= x_train_hr

    #for patch_idx in range (num_patches):
    #this_input_data = np.reshape(input_data[:,:,patch_idx], input_data.shape[:2]+input_data.shape[3:])
    #this_hr_patch, this_lr_patch = overlap_patching(gen_conf, train_conf, x_train_hr)
    #this_output_patch, out = overlap_patching(gen_conf, train_conf, output_data)

    # take patches:
    this_hr_patch, = extract_2d(x_train_hr, (32, 32))
    this_lr_patch = extract_2d(x_train_lr, (32, 32))

    x_train_lr = this_lr_patch
    x_train_hr = this_hr_patch

    #convert to grayscale
    #x_train_hr= tf.image.rgb_to_grayscale(x_train_hr)
    #x_train_hr= rgb2gray(x_train_hr)
    #x_train_hr= np.concatenate(x_train_hr,1)
    #x_train_hr= np.array(x_train_hr)
    #x_train_lr= tf.image.rgb_to_grayscale(x_train_lr)
    #x_train_lr= np.array(x_train_lr)
    #x_train_lr= rgb2gray(x_train_lr)
    #x_train_lr= np.concatenate(x_train_lr,1)

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batch_count)):
            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)

            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data_Y = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            d_loss_real = discriminator.train_on_batch(image_batch_hr,
                                                       real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr,
                                                       fake_data_Y)
            discriminator_loss = 0.5 * np.add(d_loss_fake, d_loss_real)

            rand_nums = np.random.randint(0,
                                          x_train_hr.shape[0],
                                          size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            discriminator.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, gan_Y])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss :", gan_loss)
        gan_loss = str(gan_loss)

        loss_file = open(model_save_dir + 'losses.txt', 'a')
        loss_file.write('epoch%d : gan_loss = %s ; discriminator_loss = %f\n' %
                        (e, gan_loss, discriminator_loss))
        loss_file.close()

        if e == 1 or e % 5 == 0:
            Utils.plot_generated_images(output_dir, e, generator, x_test_hr,
                                        x_test_lr)
        if e % 200 == 0:
            generator.save(model_save_dir + 'gen_model%d.h5' % e)
            discriminator.save(model_save_dir + 'dis_model%d.h5' % e)
Exemple #7
0
    for e in range(1, epochs+1):
        print ('-'*15, 'Epoch %d' % e, '-'*15)
        for _ in range(batch_count):
            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]
            generated_images_sr = generator.predict(image_batch_lr)

            real_data_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            fake_data_Y = np.random.random_sample(batch_size)*0.2
            
            discriminator.trainable = True
            
            d_loss_real = discriminator.train_on_batch(image_batch_hr, real_data_Y)
            d_loss_fake = discriminator.train_on_batch(generated_images_sr, fake_data_Y)
            #d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
            
            rand_nums = np.random.randint(0, x_train_hr.shape[0], size=batch_size)
            image_batch_hr = x_train_hr[rand_nums]
            image_batch_lr = x_train_lr[rand_nums]

            gan_Y = np.ones(batch_size) - np.random.random_sample(batch_size)*0.2
            discriminator.trainable = False
            loss_gan = gan.train_on_batch(image_batch_lr, [image_batch_hr,gan_Y])
            
        print("Loss HR , Loss LR, Loss GAN")
        print(d_loss_real, d_loss_fake, loss_gan)
		temp_loss = [d_loss_real, d_loss_fake, loss_gan]
		losses_to_file.append(temp_loss)
Exemple #8
0
def train(epochs, batch_size, input_dir, output_dir, model_save_dir,
          number_of_images, train_test_ratio, resume_train, downscale_factor,
          arch):
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    set_session(sess)

    hr_images, hr_label, lr_images, lr_label = Utils.load_training_data(
        input_dir, number_of_images)

    print(hr_images)
    loss = VGG_LOSS(IMAGE_SHAPE)
    lr_shape = (IMAGE_SHAPE[0] // downscale_factor,
                IMAGE_SHAPE[1] // downscale_factor, IMAGE_SHAPE[2])
    print(lr_shape)
    generator = Generator(lr_shape, downscale_factor, arch).generator()
    discriminator = Discriminator(IMAGE_SHAPE).discriminator()

    optimizer = Utils_model.get_optimizer()

    if (resume_train == True):
        last_epoch_number = Utils.get_last_epoch_number(model_save_dir +
                                                        'last_model_epoch.txt')

        gen_model = model_save_dir + arch + "_gen_model" + str(
            last_epoch_number) + ".h5"
        dis_model = model_save_dir + arch + "_dis_model" + str(
            last_epoch_number) + ".h5"
        generator.load_weights(gen_model)
        discriminator.load_weights(dis_model)

    else:
        last_epoch_number = 1

    generator.compile(loss=loss.vgg_loss, optimizer=optimizer)
    discriminator.compile(loss="binary_crossentropy", optimizer=optimizer)

    gan = gan_network(discriminator, lr_shape, generator, optimizer,
                      loss.vgg_loss)

    for e in range(last_epoch_number, last_epoch_number + epochs):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(1)):

            rand_nums = np.random.randint(0,
                                          hr_images.shape[0],
                                          size=batch_size)
            image_batch_hr = hr_images[rand_nums]
            image_batch_lr = lr_images[rand_nums]
            # video_images = lr_images[0]
            generated_images = generator.predict(
                image_batch_lr)  #array of generated images

            real_data = np.ones(
                batch_size) - np.random.random_sample(batch_size) * 0.2
            fake_data = np.random.random_sample(batch_size) * 0.2

            discriminator.trainable = True

            discriminator_loss_real = discriminator.train_on_batch(
                image_batch_hr, real_data)
            discriminator_loss_fake = discriminator.train_on_batch(
                generated_images, fake_data)
            discriminator_loss = 0.5 * np.add(
                discriminator_loss_fake,
                discriminator_loss_real)  #Mean Of Discriminator Loss

            rand_nums = np.random.randint(0,
                                          hr_images.shape[0],
                                          size=batch_size)

            discriminator.trainable = False
            gan_loss = gan.train_on_batch(image_batch_lr,
                                          [image_batch_hr, real_data])

        print("discriminator_loss : %f" % discriminator_loss)
        print("gan_loss : ", gan_loss)
        gan_loss = str(gan_loss)
        # generated_video_image = generator.predict(np.expand_dims(video_images,axis=0))
        Utils.save_losses_file(model_save_dir, e, gan_loss, discriminator_loss,
                               arch + '_losses.txt')
        # image_array.append(cv2.cvtColor(denormalize(generated_video_image[0]),cv2.COLOR_BGR2RGB))

        if e % EPOCHS_CHECKPOINT == 0:
            Utils.save_losses_file(model_save_dir, e, gan_loss,
                                   discriminator_loss, 'last_model_epoch.txt')
            generator.save(model_save_dir + arch + '_gen_model%d.h5' % e)
            discriminator.save(model_save_dir + arch + '_dis_model%d.h5' % e)