コード例 #1
0
ファイル: main.py プロジェクト: hnlatha/ImageDeblurring
def test(batch_size):
    # Note the x(blur) in the second, the y(full) in the first
    y_test, x_test = data_utils.load_data(data_type='test')
    g = generator_model()
    g.load_weights('weight/generator_weights.h5')
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    data_utils.generate_image(y_test, x_test, generated_images, 'result/finally/')
コード例 #2
0
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    #kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32) #锐化
    print(generated.shape)
    #ima = Image.fromarray(generated)
    #dst = cv.filter2D(ima, -1, kernel=kernel)
    #dst.save("/content/drive/My Drive/5405_digitalMedia/result/e.png")
    #image_arr = np.array(dst)

    x_test = deprocess_image(x_test)
    '''
    img = generated[0, :, :, :]
    im = Image.fromarray(img.astype(np.uint8))
    im.save("/content/drive/My Drive/5405_digitalMedia/result/f.png")
    src = cv.imread("/content/drive/My Drive/5405_digitalMedia/result/f.png")
    kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], dtype=np.float32)
    sharpen_image = cv.filter2D(src, cv.CV_32F, kernel=kernel)
    sharpen_image = cv.convertScaleAbs(sharpen_image)
    cv.imwrite("/content/drive/My Drive/5405_digitalMedia/result/g.png",sharpen_image)
    '''

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save("result.jpg")  #('deblur'+image_path)
コード例 #3
0
    def __init__(self, args):

        self.img_size = args.imgsize
        self.channels = args.channels
        self.z_dim = args.zdims
        self.epochs = args.epoch
        self.batch_size = args.batchsize

        self.d_opt = Adam(lr=1e-5, beta_1=0.1)
        self.g_opt = Adam(lr=2e-4, beta_1=0.5)

        if not os.path.exists('./result/'):
            os.makedirs('./result/')
        if not os.path.exists('./model_images/'):
            os.makedirs('./model_images/')

        """ build discriminator model """
        self.d = model.discriminator_model(self.img_size, self.channels)
        plot_model(self.d, to_file='./model_images/discriminator.png', show_shapes=True)

        """ build generator model """
        self.g = model.generator_model(self.z_dim, self.img_size, self.channels)
        plot_model(self.g, to_file='./model_images/generator', show_shapes=True)

        """ discriminator on generator model """
        self.d_on_g = model.generator_containg_discriminator(self.g, self.d, self.z_dim)
        plot_model(self.d_on_g, to_file='./model_images/d_on_g', show_shapes=True)

        self.g.compile(loss='mse', optimizer=self.g_opt)
        self.d_on_g.compile(loss='mse', optimizer=self.g_opt)
        self.d.trainable = True
        self.d.compile(loss='mse', optimizer=self.d_opt)
コード例 #4
0
def test_generator_model():
    epochs = 1
    input_data = np.random.rand(1, 100)
    input_shape = input_data.shape
    generator = model.generator_model()
    adam = Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss='binary_crossentropy', optimizer=adam)
    pred = generator.predict(input_data)
    return pred.shape
コード例 #5
0
ファイル: model_tests.py プロジェクト: jhayes14/GAN
def test_generator_model():
    epochs = 1
    input_data = np.random.rand(1, 100)
    input_shape = input_data.shape
    generator = model.generator_model()
    adam=Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss='binary_crossentropy', optimizer=adam)
    pred = generator.predict(input_data)
    return pred.shape
コード例 #6
0
ファイル: train.py プロジェクト: vdsprakash/deblur-gan
def train_multiple_outputs(n_images, batch_size, epoch_num, critic_updates=5):
    data = load_images('./images/train', n_images)
    y_train, x_train = data['B'], data['A']

    g = generator_model()
    d = discriminator_model()
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    d.trainable = True
    d.compile(optimizer=d_opt, loss=wasserstein_loss)
    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), -np.ones((batch_size, 1))

    for epoch in range(epoch_num):
        print('epoch: {}/{}'.format(epoch, epoch_num))
        print('batches: {}'.format(x_train.shape[0] / batch_size))

        permutated_indexes = np.random.permutation(x_train.shape[0])

        d_losses = []
        d_on_g_losses = []
        for index in range(int(x_train.shape[0] / batch_size)):
            batch_indexes = permutated_indexes[index*batch_size:(index+1)*batch_size]
            image_blur_batch = x_train[batch_indexes]
            image_full_batch = y_train[batch_indexes]

            generated_images = g.predict(x=image_blur_batch, batch_size=batch_size)

            for _ in range(critic_updates):
                d_loss_real = d.train_on_batch(image_full_batch, output_true_batch)
                d_loss_fake = d.train_on_batch(generated_images, output_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)
            print('batch {} d_loss : {}'.format(index+1, np.mean(d_losses)))

            d.trainable = False

            d_on_g_loss = d_on_g.train_on_batch(image_blur_batch, [image_full_batch, output_true_batch])
            d_on_g_losses.append(d_on_g_loss)
            print('batch {} d_on_g_loss : {}'.format(index+1, d_on_g_loss))

            d.trainable = True

        with open('log.txt', 'a') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses), np.mean(d_on_g_losses)))

        save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
コード例 #7
0
def test(batch_size):
    # Note the x(blur) in the second, the y(full) in the first
    y_test, x_test = data_utils.load_data(data_type='test')
    g = generator_model()
    g.load_weights(
        'C:/Users/ayush/Downloads/VideoDeblurring-MinorCOPECS/weight/generator_weights.h5'
    )
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    data_utils.generate_image(
        y_test, x_test, generated_images,
        'C:/Users/ayush/Downloads/VideoDeblurring-MinorCOPECS/result/')
コード例 #8
0
def inpaint():
    g = generator_model()
    g.load_weights(
        '/home/alyssa/PythonProjects/occluded/key_code/img_inpainting/weights/D17/generator_80000_47.h5'
    )

    sum_ssim = 0
    sum_ac = 0
    count = 0
    for i in range(9440):
        y_pre, x_pre = load_data(i * 4, (i + 1) * 4)

        generated_images = g.predict(x=x_pre, batch_size=2)
        result = (generated_images + 1) * 127.5
        re = (x_pre + 1) * 127.5
        ori = (y_pre + 1) * 127.5

        for j in range(4):
            count += 1
            cv2.imwrite(
                '/home/alyssa/PythonProjects/occluded/test_image/D17/' +
                str(count) + '_0.jpg', ori[j])
            cv2.imwrite(
                '/home/alyssa/PythonProjects/occluded/test_image/D17/' +
                str(count) + '_1.jpg', result[j])
            cv2.imwrite(
                '/home/alyssa/PythonProjects/occluded/test_image/D17/' +
                str(count) + '_2.jpg', re[j])

            a = cv2.imread(
                '/home/alyssa/PythonProjects/occluded/test_image/D17/' +
                str(count) + '_0.jpg')
            b = cv2.imread(
                '/home/alyssa/PythonProjects/occluded/test_image/D17/' +
                str(count) + '_1.jpg')
            c = cv2.imread(
                '/home/alyssa/PythonProjects/occluded/test_image/D17/' +
                str(count) + '_2.jpg')

            # ab = skimage.measure.compare_psnr(a, b)
            # ac = skimage.measure.compare_psnr(a, c)

            ab_ssim = skimage.measure.compare_ssim(a, b, multichannel=True)
            ac_ssim = skimage.measure.compare_ssim(a, c, multichannel=True)

            print(count, " ", ab_ssim)

            sum_ssim = sum_ssim + ab_ssim
            sum_ac = sum_ac + ac_ssim

    percent = sum_ssim / count
    percent_ac = sum_ac / count
    print("result:", percent, " ", percent_ac)
コード例 #9
0
ファイル: model_tests.py プロジェクト: jhayes14/GAN
def test_check_gen_model():
    '''
        Check generator creates correct image size
    '''
    generator = model.generator_model()
    adam_gen=Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss='binary_crossentropy', optimizer=adam_gen)
    #fake = train.noise_image()
    fake = np.array( [ train.noise_image() for n in range(1) ] )
    fake_predit = generator.predict(fake)

    rolled = np.rollaxis(fake_predit[0], 0, 3)
    print rolled
コード例 #10
0
def test_check_gen_model():
    '''
        Check generator creates correct image size
    '''
    generator = model.generator_model()
    adam_gen = Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss='binary_crossentropy', optimizer=adam_gen)
    #fake = train.noise_image()
    fake = np.array([train.noise_image() for n in range(1)])
    fake_predit = generator.predict(fake)

    rolled = np.rollaxis(fake_predit[0], 0, 3)
    print rolled
コード例 #11
0
ファイル: train.py プロジェクト: LiuFang816/SALSTM_py_data
def generate(img_num):
    '''
        Generate new images based on trained model.
    '''
    generator = model.generator_model()
    adam=Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    generator.compile(loss='binary_crossentropy', optimizer=adam)
    generator.load_weights('generator_weights')

    noise = np.array( [ noise_image() for n in range(img_num) ] )

    print 'Generating images..'
    generated_images = [np.rollaxis(img, 0, 3) for img in generator.predict(noise)]
    for index, img in enumerate(generated_images):
        cv2.imwrite("{}.jpg".format(index), np.uint8(255 * 0.5 * (img + 1.0)))
コード例 #12
0
def deblur_real():
    g = generator_model()
    g.load_weights(
        '/Users/albert/con_lab/alyssa/OCCLUDED/generator_57000_243.h5')

    count = 0
    for i in range(10):
        x_pre = load_data(i * 2, (i + 1) * 2)
        generated_images = g.predict(x=x_pre, batch_size=1)
        result = (generated_images + 1) * 127.5

        for j in range(2):
            count += 1
            cv2.imwrite(
                '/home/alyssa/PythonProjects/occluded/11/' + str(count) +
                '_1.jpg', result[j])
コード例 #13
0
ファイル: test.py プロジェクト: ivoPe/deblur-gan
def test(batch_size):
    data = load_images(TEST_FOLDER, batch_size)
    y_test, x_test = data['B'], data['A']
    g = generator_model()
    g.load_weights(SAVE_MODEL_PATH)
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((y, x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('results{}.png'.format(i))
コード例 #14
0
ファイル: deblur_image.py プロジェクト: shivam2296/deblur-gan
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    #    g.load_weights('weights/719/generator_2_640.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        im = Image.fromarray(img.astype(np.uint8))
        im.save('deblur' + image_path)
コード例 #15
0
ファイル: main.py プロジェクト: hnlatha/ImageDeblurring
def test_pictures(batch_size):
    data_path = 'data/test/*.jpeg'
    images_path = gb.glob(data_path)
    data_blur = []
    for image_path in images_path:
        image_blur = Image.open(image_path)
        data_blur.append(np.array(image_blur))

    data_blur = np.array(data_blur).astype(np.float32)
    data_blur = data_utils.normalization(data_blur)

    g = generator_model()
    g.load_weights('weight/generator_weights.h5')
    generated_images = g.predict(x=data_blur, batch_size=batch_size)
    generated = generated_images * 127.5 + 127.5
    for i in range(generated.shape[0]):
        image_generated = generated[i, :, :, :]
        Image.fromarray(image_generated.astype(np.uint8)).save('result/test/' + str(i) + '.png')
コード例 #16
0
ファイル: deblur_image.py プロジェクト: hyzcn/CDGAN
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator_49_478.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('deblur' + image_path)
コード例 #17
0
def deblur(img):
    # sess = tf.Session(config=tf.ConfigProto(
    #       allow_soft_placement=True, log_device_placement=True))

    # load model and model weights
    g = generator_model()
    g.load_weights('./deblur-gan/generator.h5')

    # resize image, center mean and normalize
    img = cv2.resize(img, (256, 256))[np.newaxis, ...]
    img = (img - 127.5) / 127.5
    x_test = img

    # make prediction and format output from model
    generated_images = g.predict(x=x_test)
    generated = np.array([(img * 127.5 + 127.5).astype('uint8')
                          for img in generated_images])[0, :, :, :]
    im = Image.fromarray(generated.astype(np.uint8), 'RGB')
    return im
コード例 #18
0
ファイル: utils.py プロジェクト: Fathaah/MonocularDepth
def depth(image_path):
    data = {
        'A_paths': [path + image_path],
        'A': np.array([preprocess_image(load_image(path + image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('generator.h5')
    generated_images = g.predict(x=rgb2gray(x_test))
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        #img=rgb2gray(img)
        output = img
        im = Image.fromarray(output.astype(np.uint8))
        im.save('./images/out/' + image_path)
コード例 #19
0
ファイル: deblur_image.py プロジェクト: ivoPe/deblur-gan
def deblur(image_path):
    data = {
        'A_paths': [image_path],
        'A': np.array([preprocess_image(load_image(image_path))])
    }
    x_test = data['A']
    g = generator_model()
    g.load_weights('/notebooks/deblur-gan/weights/89/generator_3_659.h5')
    generated_images = g.predict(x=x_test)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)

    for i in range(generated_images.shape[0]):
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        output = np.concatenate((x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        output_save_path = image_path.split('.')[0] + '_deblur.jpg'
        im.save(output_save_path)
コード例 #20
0
ファイル: test.py プロジェクト: sukhad-app/text_d
def test(batch_size):
    #data = load_images('./images/test', batch_size)
    y_train = sorted(glob.glob('/home/turing/td/data/*.png'))
    x_train = sorted(glob.glob('/home/turing/td/blur/*.png'))
    y_test, x_test = load_image(y_train[:5]), load_image(x_train[:5])
    g = generator_model()
    g.load_weights('weights1/428/generator_13_261.h5')
    generated_images = g.predict(x=x_test, batch_size=batch_size)
    generated = np.array([deprocess_image(img) for img in generated_images])
    x_test = deprocess_image(x_test)
    y_test = deprocess_image(y_test)

    for i in range(generated_images.shape[0]):
        y = y_test[i, :, :, :]
        x = x_test[i, :, :, :]
        img = generated[i, :, :, :]
        #print img.shape
        #img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        #y = cv2.cvtColor(y,cv2.COLOR_BGR2GRAY)
        #x = cv2.cvtColor(x,cv2.COLOR_BGR2GRAY)
        output = np.concatenate((y, x, img), axis=1)
        im = Image.fromarray(output.astype(np.uint8))
        im.save('results{}.png'.format(i))
コード例 #21
0
ファイル: train.py プロジェクト: MarkRatFelt/generate-images
def train(path, batch_size, EPOCHS):
    # reproducibility
    # np.random.seed(42)

    # fig = plt.figure()

    # Get image paths
    print("Loading paths..")
    paths = glob.glob(os.path.join(path, "*.jpg"))
    print("Got paths..")
    print(paths)

    # Load images
    IMAGES = np.array([load_image(p) for p in paths])
    np.random.shuffle(IMAGES)

    print(IMAGES[0])

    # IMAGES, labels = load_mnist(dataset="training", digits=np.arange(10), path=path)
    # IMAGES = np.array( [ np.array( [ scipy.misc.imresize(p, (64, 64)) / 256 ] * 3 ) for p in IMAGES ] )

    # np.random.shuffle( IMAGES )

    BATCHES = [b for b in chunks(IMAGES, batch_size)]

    discriminator = model.discriminator_model()
    generator = model.generator_model()
    discriminator_on_generator = model.generator_containing_discriminator(
        generator, discriminator)
    # adam_gen=Adam(lr=0.0002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    adam_gen = Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    adam_dis = Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    # opt = RMSprop()
    generator.compile(loss='binary_crossentropy', optimizer=adam_gen)
    discriminator_on_generator.compile(loss='binary_crossentropy',
                                       optimizer=adam_gen)
    discriminator.trainable = True
    discriminator.compile(loss='binary_crossentropy', optimizer=adam_dis)

    print("Number of batches", len(BATCHES))
    print("Batch size is", batch_size)

    # margin = 0.25
    # equilibrium = 0.6931
    inter_model_margin = 0.10

    for epoch in range(EPOCHS):
        print()
        print("Epoch", epoch)
        print()

        # load weights on first try (i.e. if process failed previously and we are attempting to recapture lost data)
        if epoch == 0:
            if os.path.exists('generator_weights') and os.path.exists(
                    'discriminator_weights'):
                print("Loading saves weights..")
                generator.load_weights('generator_weights')
                discriminator.load_weights('discriminator_weights')
                print("Finished loading")
            else:
                pass

        for index, image_batch in enumerate(BATCHES):
            print("Epoch", epoch, "Batch", index)

            Noise_batch = np.array(
                [noise_image() for n in range(len(image_batch))])
            generated_images = generator.predict(Noise_batch)
            # print generated_images[0][-1][-1]

            for i, img in enumerate(generated_images):
                rolled = np.rollaxis(img, 0, 3)
                cv2.imwrite('results/' + str(i) + ".jpg",
                            np.uint8(255 * 0.5 * (rolled + 1.0)))

            Xd = np.concatenate((image_batch, generated_images))
            yd = [1] * len(image_batch) + [0] * len(image_batch)  # labels

            print("Training first discriminator..")
            d_loss = discriminator.train_on_batch(Xd, yd)

            Xg = Noise_batch
            yg = [1] * len(image_batch)

            print("Training first generator..")
            g_loss = discriminator_on_generator.train_on_batch(Xg, yg)

            print("Initial batch losses : ", "Generator loss", g_loss,
                  "Discriminator loss", d_loss, "Total:", g_loss + d_loss)

            # print "equilibrium - margin", equilibrium - margin

            if g_loss < d_loss and abs(d_loss - g_loss) > inter_model_margin:
                # for j in range(handicap):
                while abs(d_loss - g_loss) > inter_model_margin:
                    print("Updating discriminator..")
                    # g_loss = discriminator_on_generator.train_on_batch(Xg, yg)
                    d_loss = discriminator.train_on_batch(Xd, yd)
                    print("Generator loss", g_loss, "Discriminator loss",
                          d_loss)
                    if d_loss < g_loss:
                        break
            elif d_loss < g_loss and abs(d_loss - g_loss) > inter_model_margin:
                # for j in range(handicap):
                while abs(d_loss - g_loss) > inter_model_margin:
                    print("Updating generator..")
                    # d_loss = discriminator.train_on_batch(Xd, yd)
                    g_loss = discriminator_on_generator.train_on_batch(Xg, yg)
                    print("Generator loss", g_loss, "Discriminator loss",
                          d_loss)
                    if g_loss < d_loss:
                        break
            else:
                pass

            print("Final batch losses (after updates) : ", "Generator loss",
                  g_loss, "Discriminator loss", d_loss, "Total:",
                  g_loss + d_loss)
            print()
            if index % 20 == 0:
                print('Saving weights..')
                generator.save_weights('generator_weights', True)
                discriminator.save_weights('discriminator_weights', True)

        plt.clf()
        for i, img in enumerate(generated_images[:5]):
            i = i + 1
            plt.subplot(3, 3, i)
            rolled = np.rollaxis(img, 0, 3)
            # plt.imshow(rolled, cmap='gray')
            plt.imshow(rolled)
            plt.axis('off')
        # fig.canvas.draw()
        plt.savefig('Epoch_' + str(epoch) + '.png')
コード例 #22
0
def train_multiple_outputs(n_images, batch_size, epoch_num, critic_updates=5):
    g = generator_model()
    d = discriminator_model()
    vgg = build_vgg()
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d, vgg)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    optimizer = Adam(1E-4, 0.5)
    vgg.trainable = False
    vgg.compile(loss='mse', optimizer=optimizer, metrics=['accuracy'])

    d.trainable = True
    d.compile(optimizer=d_opt, loss='binary_crossentropy')
    d.trainable = False
    loss = ['mae', 'mse', 'binary_crossentropy']
    loss_weights = [0.1, 100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), np.zeros(
        (batch_size, 1))

    for epoch in range(epoch_num):
        print('epoch: {}/{}'.format(epoch, epoch_num))

        y_pre, x_pre, mask = load_data(batch_size)

        d_losses = []
        d_on_g_losses = []

        generated_images = g.predict(x=x_pre, batch_size=batch_size)

        for _ in range(critic_updates):
            d_loss_real = d.train_on_batch(y_pre, output_true_batch)
            d_loss_fake = d.train_on_batch(generated_images,
                                           output_false_batch)
            d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
            d_losses.append(d_loss)
        print('batch {} d_loss : {}'.format(epoch, np.mean(d_losses)))

        d.trainable = False

        real_result = mask * y_pre
        y_features = vgg.predict(y_pre)

        d_on_g_loss = d_on_g.train_on_batch(
            [x_pre, mask], [real_result, y_features, output_true_batch])
        d_on_g_losses.append(d_on_g_loss)
        print('batch {} d_on_g_loss : {}'.format(epoch, d_on_g_loss))

        d.trainable = True

        if epoch % 100 == 0:
            generated = np.array([(img + 1) * 127.5
                                  for img in generated_images])
            full = np.array([(img + 1) * 127.5 for img in y_pre])
            blur = np.array([(img + 1) * 127.5 for img in x_pre])

            for i in range(3):
                img_ge = generated[i, :, :, :]
                img_fu = full[i, :, :, :]
                img_bl = blur[i, :, :, :]
                output = np.concatenate((img_ge, img_fu, img_bl), axis=1)
                cv2.imwrite(
                    '/home/alyssa/PythonProjects/occluded/key_code/img_inpainting/out/'
                    + str(epoch) + '_' + str(i) + '.jpg', output)

        if (epoch > 10000 and epoch % 1000 == 0):
            save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
コード例 #23
0
ファイル: main.py プロジェクト: liuglen/imagdeblurring
def train(batch_size, epoch_num):
    # Note the x(blur) in the second, the y(full) in the first
    y_train, x_train = data_utils.load_data(data_type='train')

    # GAN
    g = generator_model()
    d = discriminator_model()
    d_on_g = generator_containing_discriminator(g, d)

    # compile the models, use default optimizer parameters
    # generator use adversarial loss
    g.compile(optimizer='adam', loss=generator_loss)
    # discriminator use binary cross entropy loss
    d.compile(optimizer='adam', loss='binary_crossentropy')
    # adversarial net use adversarial loss
    d_on_g.compile(optimizer='adam', loss=adversarial_loss)

    for epoch in range(epoch_num):
        print('epoch: ', epoch + 1, '/', epoch_num)
        print('batches: ', int(x_train.shape[0] / batch_size))

        for index in range(int(x_train.shape[0] / batch_size)):
            # select a batch data
            image_blur_batch = x_train[index * batch_size:(index + 1) *
                                       batch_size]
            image_full_batch = y_train[index * batch_size:(index + 1) *
                                       batch_size]
            generated_images = g.predict(x=image_blur_batch,
                                         batch_size=batch_size)

            # output generated images for each 30 iters
            if (index % 30 == 0) and (index != 0):
                data_utils.generate_image(image_full_batch, image_blur_batch,
                                          generated_images, 'result/interim/',
                                          epoch, index)

            # concatenate the full and generated images,
            # the full images at top, the generated images at bottom
            x = np.concatenate((image_full_batch, generated_images))

            # generate labels for the full and generated images
            y = [1] * batch_size + [0] * batch_size

            # train discriminator
            d_loss = d.train_on_batch(x, y)
            print('batch %d d_loss : %f' % (index + 1, d_loss))

            # let discriminator can't be trained
            d.trainable = False

            # train adversarial net
            d_on_g_loss = d_on_g.train_on_batch(image_blur_batch,
                                                [1] * batch_size)
            print('batch %d d_on_g_loss : %f' % (index + 1, d_on_g_loss))

            # train generator
            g_loss = g.train_on_batch(image_blur_batch, image_full_batch)
            print('batch %d g_loss : %f' % (index + 1, g_loss))

            # let discriminator can be trained
            d.trainable = True

            # output weights for generator and discriminator each 30 iters
            if (index % 30 == 0) and (index != 0):
                g.save_weights('weight/generator_weights.h5', True)
                d.save_weights('weight/discriminator_weights.h5', True)
コード例 #24
0
x_shape = 512
y_shape = 512

def train(gen,disc,cGAN,gray,rgb,gray_val,rgb_val,batch):
    samples = len(rgb)
    gen_image = gen.predict(gray, batch_size=16)   
    gen_image_val = gen.predict(gray_val, batch_size=8)
    inputs = np.concatenate([gray, gray])
    outputs = np.concatenate([rgb, gen_image])
    y = np.concatenate([np.ones((samples, 1)), np.zeros((samples, 1))])
    disc.fit([inputs, outputs], y, epochs=1, batch_size=4)
    disc.trainable = False
    cGAN.fit(gray, [np.ones((samples, 1)), rgb], epochs=1, batch_size=batch,validation_data=[gray_val,[np.ones((val_samples,1)),rgb_val]])
    disc.trainable = True

gen = generator_model(x_shape,y_shape)

disc = discriminator_model(x_shape,y_shape)

cGAN = cGAN_model(gen, disc)
# cGAN.load_weights('sketchColorisation/result/store/9950.h5')

disc.compile(loss=['binary_crossentropy'], optimizer=tf.keras.optimizers.Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08), metrics=['accuracy'])

cGAN.compile(loss=['binary_crossentropy',custom_loss_2], loss_weights=[5, 100], optimizer=tf.keras.optimizers.Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08))
tensorboard = tf.keras.callbacks.TensorBoard(log_dir="logs/{}".format(time()))

dataset = 'sketchColorisation/Images/' 
graystore = 'sketchColorisation/grayScale/'
rgbstore = 'sketchColorisation/colored/'
val_data = 'sketchColorisation/validation/'
コード例 #25
0
from tensorflow.python.client import device_lib

print(device_lib.list_local_devices())

reader = Reader('data/train_Q.post',
                'data/train_Q.response',
                'data/result.txt')

print(len(reader.d))

g_model = generator_model(vocab_size=len(reader.d),
                          embedding_size=128,
                          lstm_size=128,
                          num_layer=4,
                          max_length_encoder=40,
                          max_length_decoder=40,
                          max_gradient_norm=2,
                          batch_size_num=20,
                          learning_rate=0.001,
                          beam_width=5)
d_model = discriminator_model(vocab_size=len(reader.d),
                              embedding_size=128,
                              lstm_size=128,
                              num_layer=4,
                              max_post_length=40,
                              max_resp_length=40,
                              max_gradient_norm=2,
                              batch_size_num=20,
                              learning_rate=0.001)

saver = tf.train.Saver(tf.global_variables(), keep_checkpoint_every_n_hours=1.0)
コード例 #26
0
ファイル: model.py プロジェクト: Fathaah/MonocularDepth
def train_multiple_outputs(n_images, batch_size, epoch_num, critic_updates=5):

    g = generator_model()
    d = discriminator_model()
    g.load_weights('generator.h5')
    d.load_weights('discriminator.h5')
    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    d.trainable = True
    d.compile(optimizer=d_opt, loss=wasserstein_loss)
    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), np.zeros(
        (batch_size, 1))

    for epoch in range(epoch_num):
        print('epoch: {}/{}'.format(epoch, epoch_num))
        print('batches: {}'.format(batch_size))
        start = 0

        d_losses = []
        d_on_g_losses = []
        shuffle()
        for index in range(int(25000 // batch_size)):
            data = load_images(start, batch_size)
            y_train, x_train = data['B'], data['A']
            image_blur_batch = x_train
            image_full_batch = y_train
            generated_images = g.predict(x=image_blur_batch,
                                         batch_size=batch_size)

            for _ in range(critic_updates):
                d_loss_real = d.train_on_batch(image_full_batch,
                                               output_true_batch)
                d_loss_fake = d.train_on_batch(generated_images,
                                               output_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)
            print('batch {} d_loss : {}'.format(index + 1, np.mean(d_losses)))

            d.trainable = False

            d_on_g_loss = d_on_g.train_on_batch(
                image_blur_batch, [image_full_batch, output_true_batch])
            d_on_g_losses.append(d_on_g_loss)
            print('batch {} d_on_g_loss : {}'.format(index + 1, d_on_g_loss))

            d.trainable = True
            if (index % 300):
                save_all_weights(d, g, epoch, int(index * 10))
            start += batch_size

        with open('log.txt', 'a') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses),
                                            np.mean(d_on_g_losses)))

        save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
コード例 #27
0
train_images=train_images.reshape(-1, 28, 28, 1).astype('float32')
train_images=(train_images-127.5)/127.5

train_dataset=tf.data.Dataset.from_tensor_slices(train_images).shuffle(len(train_images)).batch(batch_size)

def discriminator_loss(real_output, fake_output):
    real_loss=cross_entropy(tf.ones_like(real_output), real_output)
    fake_loss=cross_entropy(tf.zeros_like(fake_output), fake_output)
    total_loss=real_loss+fake_loss
    return total_loss

def generator_loss(fake_output):
    return cross_entropy(tf.ones_like(fake_output), fake_output)

generator=model.generator_model()
discriminator=model.discriminator_model()

cross_entropy=tf.keras.losses.BinaryCrossentropy(from_logits=True)

train_generator_loss=tf.keras.metrics.Mean()
train_discriminator_loss=tf.keras.metrics.Mean()

generator_optimizer=tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer=tf.keras.optimizers.Adam(1e-4)

checkpoint=tf.train.Checkpoint(generator_optimizer=generator_optimizer,
                                 discriminator_optimizer=discriminator_optimizer,
                                 generator=generator,
                                 discriminator=discriminator)
コード例 #28
0
ファイル: train.py プロジェクト: sukhad-app/text_d
def train_multiple_outputs(n_images, batch_size, epoch_num, critic_updates=5):
    #data = load_images('/home/turing/td/', n_images)
    y_train = sorted(glob.glob('/home/turing/td/data/*.png'))
    x_train = sorted(glob.glob('/home/turing/td/blur/*.png'))
    print('loaded_data')
    g = generator_model()
    g.load_weights('weights/424/generator_19_290.h5')
    d = discriminator_model()
    d.load_weights('weights/424/discriminator_19.h5')

    d_on_g = generator_containing_discriminator_multiple_outputs(g, d)

    d_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    d_on_g_opt = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)

    d.trainable = True
    d.compile(optimizer=d_opt, loss=wasserstein_loss)
    d.trainable = False
    loss = [perceptual_loss, wasserstein_loss]
    loss_weights = [100, 1]
    d_on_g.compile(optimizer=d_on_g_opt, loss=loss, loss_weights=loss_weights)
    d.trainable = True

    output_true_batch, output_false_batch = np.ones((batch_size, 1)), np.zeros(
        (batch_size, 1))

    for epoch in range(epoch_num):
        print('epoch: {}/{}'.format(epoch, epoch_num))
        print('batches: {}'.format(len(x_train) / batch_size))

        permutated_indexes = np.random.permutation(len(x_train))

        d_losses = []
        d_on_g_losses = []
        for index in range(int(len(x_train) / batch_size)):
            batch_indexes = permutated_indexes[index * batch_size:(index + 1) *
                                               batch_size]
            x_t = []
            y_t = []
            for i in batch_indexes:
                x_t.append(x_train[i])
                y_t.append(y_train[i])
            image_blur_batch = load_batch(x_t)
            image_full_batch = load_batch(y_t)

            generated_images = g.predict(x=image_blur_batch,
                                         batch_size=batch_size)

            for _ in range(critic_updates):
                d_loss_real = d.train_on_batch(image_full_batch,
                                               output_true_batch)
                d_loss_fake = d.train_on_batch(generated_images,
                                               output_false_batch)
                d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
                d_losses.append(d_loss)
            print('batch {} d_loss : {}'.format(index + 1, np.mean(d_losses)))

            d.trainable = False

            d_on_g_loss = d_on_g.train_on_batch(
                image_blur_batch, [image_full_batch, output_true_batch])
            d_on_g_losses.append(d_on_g_loss)
            print('batch {} d_on_g_loss : {}'.format(index + 1, d_on_g_loss))

            d.trainable = True

        with open('log.txt', 'a') as f:
            f.write('{} - {} - {}\n'.format(epoch, np.mean(d_losses),
                                            np.mean(d_on_g_losses)))

        save_all_weights(d, g, epoch, int(np.mean(d_on_g_losses)))
コード例 #29
0
ファイル: train.py プロジェクト: jhayes14/GAN
def train(path, batch_size, EPOCHS):

    #reproducibility
    #np.random.seed(42)

    fig = plt.figure()

    # Get image paths
    print "Loading paths.."
    paths = glob.glob(os.path.join(path, "*.jpg"))
    print "Got paths.."

    # Load images
    IMAGES = np.array( [ load_image(p) for p in paths ] )
    np.random.shuffle( IMAGES )

    #IMAGES, labels = load_mnist(dataset="training", digits=np.arange(10), path=path)
    #IMAGES = np.array( [ np.array( [ scipy.misc.imresize(p, (64, 64)) / 256 ] * 3 ) for p in IMAGES ] )

    #np.random.shuffle( IMAGES )

    BATCHES = [ b for b in chunks(IMAGES, batch_size) ]

    discriminator = model.discriminator_model()
    generator = model.generator_model()
    discriminator_on_generator = model.generator_containing_discriminator(generator, discriminator)
    #adam_gen=Adam(lr=0.0002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    adam_gen=Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    adam_dis=Adam(lr=0.00002, beta_1=0.0005, beta_2=0.999, epsilon=1e-08)
    #opt = RMSprop()
    generator.compile(loss='binary_crossentropy', optimizer=adam_gen)
    discriminator_on_generator.compile(loss='binary_crossentropy', optimizer=adam_gen)
    discriminator.trainable = True
    discriminator.compile(loss='binary_crossentropy', optimizer=adam_dis)

    print "Number of batches", len(BATCHES)
    print "Batch size is", batch_size

    #margin = 0.25
    #equilibrium = 0.6931
    inter_model_margin = 0.10

    for epoch in range(EPOCHS):
        print
        print "Epoch", epoch
        print

        # load weights on first try (i.e. if process failed previously and we are attempting to recapture lost data)
        if epoch == 0:
            if os.path.exists('generator_weights') and os.path.exists('discriminator_weights'):
                print "Loading saves weights.."
                generator.load_weights('generator_weights')
                discriminator.load_weights('discriminator_weights')
                print "Finished loading"
            else:
                pass

        for index, image_batch in enumerate(BATCHES):
            print "Epoch", epoch, "Batch", index

            Noise_batch = np.array( [ noise_image() for n in range(len(image_batch)) ] )
            generated_images = generator.predict(Noise_batch)
            #print generated_images[0][-1][-1]

            for i, img in enumerate(generated_images):
                rolled = np.rollaxis(img, 0, 3)
                cv2.imwrite('results/' + str(i) + ".jpg", np.uint8(255 * 0.5 * (rolled + 1.0)))

            Xd = np.concatenate((image_batch, generated_images))
            yd = [1] * len(image_batch) + [0] * len(image_batch) # labels

            print "Training first discriminator.."
            d_loss = discriminator.train_on_batch(Xd, yd)

            Xg = Noise_batch
            yg = [1] * len(image_batch)

            print "Training first generator.."
            g_loss = discriminator_on_generator.train_on_batch(Xg, yg)

            print "Initial batch losses : ", "Generator loss", g_loss, "Discriminator loss", d_loss, "Total:", g_loss + d_loss

            #print "equilibrium - margin", equilibrium - margin

            if g_loss < d_loss and abs(d_loss - g_loss) > inter_model_margin:
                #for j in range(handicap):
                while abs(d_loss - g_loss) > inter_model_margin:
                    print "Updating discriminator.."
                    #g_loss = discriminator_on_generator.train_on_batch(Xg, yg)
                    d_loss = discriminator.train_on_batch(Xd, yd)
                    print "Generator loss", g_loss, "Discriminator loss", d_loss
                    if d_loss < g_loss:
                        break
            elif d_loss < g_loss and abs(d_loss - g_loss) > inter_model_margin:
                #for j in range(handicap):
                while abs(d_loss - g_loss) > inter_model_margin:
                    print "Updating generator.."
                    #d_loss = discriminator.train_on_batch(Xd, yd)
                    g_loss = discriminator_on_generator.train_on_batch(Xg, yg)
                    print "Generator loss", g_loss, "Discriminator loss", d_loss
                    if g_loss < d_loss:
                        break
            else:
                pass

            print "Final batch losses (after updates) : ", "Generator loss", g_loss, "Discriminator loss", d_loss, "Total:", g_loss + d_loss
            print

            if index % 20 == 0:
                print 'Saving weights..'
                generator.save_weights('generator_weights', True)
                discriminator.save_weights('discriminator_weights', True)

        plt.clf()
        for i, img in enumerate(generated_images[:5]):
            i = i+1
            plt.subplot(3, 3, i)
            rolled = np.rollaxis(img, 0, 3)
            #plt.imshow(rolled, cmap='gray')
            plt.imshow(rolled)
            plt.axis('off')
        fig.canvas.draw()
        plt.savefig('Epoch_' + str(epoch) + '.png')