def test(batch_size):
    data = load_images('./images/test', batch_size)
    y_test, x_test = data['B'], data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    acc = 0

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        mse = np.sum(
            (y - img)**(2)) / (generated.shape[1] * generated.shape[2] *
                               generated.shape[3])
        psnr = 10 * math.log10((255**2) / mse)
        acc = acc + psnr
        output = np.concatenate((y, x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('results{}.png'.format(i))

    final_acc = acc / (generated_images.shape[0])
    print('test accuracy', final_acc)
Beispiel #2
0
def compare(batch_size, input_dir, output_dir):
    data = load_images(input_dir, batch_size)
    y_test, x_test = data['B'], data['A']
    weights = [
        'generator.h5', 'weights/DIV2K_1/generator_3_374.h5',
        'weights/DIV2K_2/generator_3_507.h5'
    ]
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    generated = []
    for weight in weights:
        g = generator_model()
        g.load_weights(weight)
        generated_images = g.predict(x=x_test, batch_size=batch_size)
        generated.append([deprocess_image(img) for img in generated_images])
    generated = np.array(generated)
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img_0 = generated[0, i, :, :, :]  # original
        img_1 = generated[1, i, :, :, :]  # trainsfer learning
        img_2 = generated[
            2, i, :, :, :]  # trainsfer learning with locked parameters

        # combine imgs and store
        output = np.concatenate((y, x, img_0, img_1, img_2), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save(os.path.join(output_dir, 'results{}.png'.format(i)))
Beispiel #3
0
def train_multiple_outputs(n_images, batch_size, log_dir, epoch_num, critic_updates=5):
    data = load_images('/home/a/public/zcz/deblur-gan/train/', n_images)
    y_train, x_train = data['B'], data['A']

    g = generator_model()
    d = discriminator_model()
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    d.trainable = True
    d.compile(optimizer=d_opt, loss=wasserstein_loss)
    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), -np.ones((batch_size, 1))

    log_path = './logs'
    tensorboard_callback = TensorBoard(log_path)

    for epoch in tqdm.tqdm(range(epoch_num)):
        permutated_indexes = np.random.permutation(x_train.shape[0])

        d_losses = []
        d_on_g_losses = []
        for index in range(int(x_train.shape[0] / batch_size)):
            batch_indexes = permutated_indexes[index*batch_size:(index+1)*batch_size]
            image_blur_batch = x_train[batch_indexes]
            image_full_batch = y_train[batch_indexes]

            generated_images = g.predict(x=image_blur_batch, batch_size=batch_size)

            for _ in range(critic_updates):
                d_loss_real = d.train_on_batch(image_full_batch, output_true_batch)
                d_loss_fake = d.train_on_batch(generated_images, output_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)

            d.trainable = False

            d_on_g_loss = d_on_g.train_on_batch(image_blur_batch, [image_full_batch, output_true_batch])
            d_on_g_losses.append(d_on_g_loss)

            d.trainable = True

        # write_log(tensorboard_callback, ['g_loss', 'd_on_g_loss'], [np.mean(d_losses), np.mean(d_on_g_losses)], epoch_num)
        print(np.mean(d_losses), np.mean(d_on_g_losses))
        with open('log.txt', 'a+') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses), np.mean(d_on_g_losses)))

        save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
Beispiel #4
0
def test(batch_size):
    data = load_images('./images/test', batch_size)
    y_test, x_test = data['B'], data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((y, x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('results{}.png'.format(i))
Beispiel #5
0
def test(batch_size):
    data = load_images('../images/test', 300)
    y_test, x_test = data['B'], data['A']
    g = generator_model()
    # g.load_weights('./weights/331/generator_3_1538.h5')
    # g.load_weights('./weights_hard/331/generator_3_1746.h5')
    g.load_weights('../generator4-40.h5')
    # g.load_weights('../deblur-40.h5')

    # im1 = tf.decode_png('../images/test/A/GOPR0384_11_00_000001.png')
    # im2 = tf.decode_png('../images/test/B/GOPR0384_11_00_000001.png')
    # ssim = tf.image.ssim(im1, im2, max_val=255)
    # print(ssim)
    psnr = 0
    ssim = 0
    # with tf.Session() as sess:
    #     sess.run(tf.initialize_all_variables())
    for index in tqdm.tqdm(range(int(300 / batch_size))):
        batch_test = x_test[index * batch_size:(index + 1) * batch_size]
        batch_label = y_test[index * batch_size:(index + 1) * batch_size]

        generated_images = g.predict(x=batch_test, batch_size=batch_size)
        generated = np.array(
            [deprocess_image(img) for img in generated_images])
        batch_test = deprocess_image(batch_test)
        batch_label = deprocess_image(batch_label)

        # for i in range(generated_images.shape[0]):
        #     y = batch_label[i, :, :, :]
        #     x = batch_test[i, :, :, :]
        #     img = generated[i, :, :, :]
        #     with tf.Session() as sess:
        #         sess.run(tf.initialize_all_variables())
        #        yy = tf.convert_to_tensor(y, dtype=tf.float32)
        #        imgimg = tf.convert_to_tensor(img, dtype=tf.float32)
        #        # ssim = tf.image.ssim(yy, imgimg, max_val=255)
        #        # psnr = tf.image.psnr(yy,imgimg,max_val=255)
        #        # sess.run(psnr)
        #        ssim += sess.run(tf.image.ssim(yy, imgimg, max_val=255))
        #        pp += sess.run(tf.image.psnr(yy,imgimg,max_val=255))
        #         # print(sim)

        # for i in range(generated_images.shape[0]):
        #    y = batch_label[i, :, :, :]
        #    x = batch_test[i, :, :, :]
        #    img = generated[i, :, :, :]
        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            yy = tf.convert_to_tensor(batch_label, dtype=tf.float32)
            imgimg = tf.convert_to_tensor(generated, dtype=tf.float32)
            # ssim = tf.image.ssim(yy, imgimg, max_val=255)
            # psnr = tf.image.psnr(yy,imgimg,max_val=255)
            # sess.run(psnr)
            ss = sess.run(tf.image.ssim(yy, imgimg, max_val=255))
            ssim += np.mean(ss)
            pp = sess.run(tf.image.psnr(yy, imgimg, max_val=255))
            psnr += np.mean(pp)
            # print(sim
            # print(ssim)
            # print(psnr)
            # yy= np.transpose(y[np.newaxis,...],(0,3,1,2))
            # imgimg = np.transpose(img[np.newaxis,...],(0,3,1,2))
            # psnr += PSNR(y,img)
            # print(psnr)
            # ssim += SSIM(yy,imgimg)

            # output = np.concatenate((y, x, img), axis=1)
            # im = Image.fromarray(output.astype(np.uint8))
            # im.save('results{}.png'.format(i))
    num = int(300 / batch_size)
    print(psnr / num)
    # print(pp/300)
    print(ssim / num)
Beispiel #6
0
def train_multiple_outputs(n_images,
                           batch_size,
                           log_dir,
                           epoch_num,
                           critic_updates=5):
    data = load_images('../images/train', n_images)
    y_train, x_train = data['B'], data['A']

    g = generator_model()
    d = discriminator_model()
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    d.trainable = True
    d.compile(optimizer=d_opt, loss=wasserstein_loss)
    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    log_path = './logs'
    tensorboard_callback = TensorBoard(log_path)

    hn_num = int(batch_size * 0.5)
    hp_num = int(batch_size * 0.5)

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), -np.ones(
        (batch_size, 1))
    hard_true_batch, hard_false_batch = np.ones(
        (batch_size + hp_num, 1)), -np.ones((batch_size + hn_num, 1))

    for epoch in tqdm.tqdm(range(epoch_num)):
        permutated_indexes = np.random.permutation(x_train.shape[0])

        d_losses = []
        d_on_g_losses = []
        for index in range(int(x_train.shape[0] / batch_size)):
            batch_indexes = permutated_indexes[index * batch_size:(index + 1) *
                                               batch_size]
            image_blur_batch = x_train[batch_indexes]
            image_full_batch = y_train[batch_indexes]

            generated_images = g.predict(x=image_blur_batch,
                                         batch_size=batch_size)

            ##############
            # d.trainable = False
            # temp_hn = []
            # for i in range(generated_images.shape[0]):
            #     t_s = d.predict(generated_images[i])
            #     temp_hn.append(t_s)
            # hn_ind = np.argsort(temp_hn)[::-1][:hn_num]
            #
            # hard_neg  = generated_images[hn_ind]
            # hard_neg_y= image_full_batch[hn_ind]
            # hard_pos = []
            # hard_pos = []
            #
            # neg_train= np.concatenate((generated_images,hard_neg),axis=0)
            # pos_train= np.concatenate((image_full_batch,hard_neg_y),axis=0)

            for _ in range(critic_updates):
                d.trainable = False
                temp_hn = []
                for i in range(generated_images.shape[0]):
                    t_s = d.predict(generated_images[i][np.newaxis, ...])[0][0]
                    temp_hn.append(t_s)
                hn_ind = np.argsort(temp_hn)[::-1][:hn_num]

                hard_neg = generated_images[hn_ind]
                hard_neg_y = image_full_batch[hn_ind]

                d.trainable = True
                neg_train = np.concatenate((generated_images, hard_neg),
                                           axis=0)
                pos_train = np.concatenate((image_full_batch, hard_neg_y),
                                           axis=0)
                d_loss_real = d.train_on_batch(pos_train, hard_true_batch)
                d_loss_fake = d.train_on_batch(neg_train, hard_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)

            # for _ in range(critic_updates):
            #     d_loss_real = d.train_on_batch(image_full_batch, output_true_batch)
            #     d_loss_fake = d.train_on_batch(generated_images, output_false_batch)
            #     d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
            #     d_losses.append(d_loss)

            # #################################
            d.trainable = False
            temp_g = []
            for i in range(generated_images.shape[0]):
                t_s = d.predict(generated_images[i][np.newaxis, ...])[0][0]
                temp_g.append(t_s)
            hn_ind = np.argsort(temp_g)[:hn_num]

            hard_g_x = image_blur_batch[hn_ind]
            hard_g_y = image_full_batch[hn_ind]
            g_blur = np.concatenate((image_blur_batch, hard_g_x), axis=0)
            g_full = np.concatenate((image_full_batch, hard_g_y), axis=0)
            d_on_g_loss = d_on_g.train_on_batch(g_blur,
                                                [g_full, hard_true_batch])
            d_on_g_losses.append(d_on_g_loss)
            #
            # d.trainable = True
            ##############################
            # d.trainable = False

            # d_on_g_loss = d_on_g.train_on_batch(image_blur_batch, [image_full_batch, output_true_batch])
            # d_on_g_loss = d_on_g.train_on_batch(g_blur, [g_full, hard_true_batch])

            # d_on_g_losses.append(d_on_g_loss)

            d.trainable = True

        # write_log(tensorboard_callback, ['g_loss', 'd_on_g_loss'], [np.mean(d_losses), np.mean(d_on_g_losses)], epoch_num)
        print(np.mean(d_losses), np.mean(d_on_g_losses))
        with open('log.txt', 'a+') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses),
                                            np.mean(d_on_g_losses)))

        save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
Beispiel #7
0
def train_multiple_outputs(n_images,
                           batch_size,
                           input_dir,
                           log_dir,
                           weights_dir,
                           generator_weights,
                           discriminator_weights,
                           use_transfer,
                           epoch_num,
                           critic_updates=5):
    data = load_images(input_dir, n_images)
    y_train, x_train = data['B'], data['A']

    g = generator_model()
    d = discriminator_model()
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    if generator_weights != None:
        g.load_weights(generator_weights)
    if discriminator_weights != None:
        d.load_weights(discriminator_weights)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    def freeze_except(m, i, j):
        for layer in m.layers:
            layer.trainable = False
        for layer in m.layers[i:j]:
            layer.trainable = True

    d.trainable = True
    if use_transfer:
        freeze_except(g, -4, -2)
    d.compile(optimizer=d_opt, loss=wasserstein_loss)

    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), -np.ones(
        (batch_size, 1))

    tensorboard_callback = TensorBoard(log_dir)

    for epoch in tqdm.tqdm(range(epoch_num)):
        permutated_indexes = np.random.permutation(x_train.shape[0])

        d_losses = []
        d_on_g_losses = []
        for index in range(int(x_train.shape[0] / batch_size)):
            batch_indexes = permutated_indexes[index * batch_size:(index + 1) *
                                               batch_size]
            image_blur_batch = x_train[batch_indexes]
            image_full_batch = y_train[batch_indexes]

            generated_images = g.predict(x=image_blur_batch,
                                         batch_size=batch_size)

            for _ in range(critic_updates):
                d_loss_real = d.train_on_batch(image_full_batch,
                                               output_true_batch)
                d_loss_fake = d.train_on_batch(generated_images,
                                               output_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)

            d.trainable = False

            d_on_g_loss = d_on_g.train_on_batch(
                image_blur_batch, [image_full_batch, output_true_batch])
            d_on_g_losses.append(d_on_g_loss)

            d.trainable = True

        # write_log(tensorboard_callback, ['g_loss', 'd_on_g_loss'], [np.mean(d_losses), np.mean(d_on_g_losses)], epoch_num)
        print(np.mean(d_losses), np.mean(d_on_g_losses))

        with open(os.path.join(log_dir, 'train_loss.txt'), 'a+') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses),
                                            np.mean(d_on_g_losses)))

        print(np.mean(d_on_g_losses))
        save_all_weights(d, g, epoch, np.int(np.mean(d_on_g_losses)),
                         weights_dir)