示例#1
0
def show_generator_output(sess, n_images, z_input, out_channel_dim,
                          image_mode):
    """
    Show example output for the generator
    :param sess: TensorFlow session
    :param n_images: Number of Images to display
    :param input_z: Input Z Tensor
    :param out_channel_dim: The number of channels in the output image
    :param image_mode: The mode to use for images ("RGB" or "L")
    """
    cmap = None if image_mode == 'RGB' else 'gray'  #设置灰度还是彩色图片

    z_dim = np.shape(z_input)[-1]
    example_z = np.random.uniform(-1, 1, size=(n_images, z_dim))
    #print(np.shape(example_z),'example_z')
    #test=generator(example_z, out_channel_dim, is_train=False,reuse=True)

    result = sess.run(
        generator(example_z, out_channel_dim, is_train=False,
                  reuse=True))  #False代表的是非训练过程,这里的参数是reuse=true
    #feed_dict={input_z: example_z})

    images_grid = helper.images_square_grid(result, image_mode)
    pyplot.imshow(images_grid, cmap=cmap)
    pyplot.show()
示例#2
0
def output_fig(images_array, file_name="./results"):
    # the shape of your images_array should be (9, width, height, 3),
    # 28 <= width, height <= 112
    plt.figure(figsize=(6, 6), dpi=100)
    plt.imshow(helper.images_square_grid(images_array))
    plt.axis("off")
    plt.savefig(file_name + '.png', bbox_inches='tight', pad_inches=0)
示例#3
0
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode, image_path, save, show):
    """
    Show example output for the generator
    :param sess: TensorFlow session
    :param n_images: Number of Images to display
    :param input_z: Input Z Tensor
    :param out_channel_dim: The number of channels in the output image
    :param image_mode: The mode to use for images ("RGB" or "L")
    :param image_path: Path to save the image
    """
    cmap = None if image_mode == 'RGB' else 'gray'
    z_dim = input_z.get_shape().as_list()[-1]
    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])

    samples = sess.run(
        generator(input_z, out_channel_dim, False),
        feed_dict={input_z: example_z})

    images_grid = helper.images_square_grid(samples, image_mode)
    
    if save == True:
        # Save image
        images_grid.save(image_path, 'JPEG')
    
    if show == True:
        plt.imshow(images_grid, cmap=cmap)
        plt.show()
def view_sample(gen_samples, data_image_mode):
    mosaic = helper.images_square_grid(gen_samples, data_image_mode)
    if data_image_mode == 'L':
        pyplot.imshow(mosaic, cmap='gray')
    else:
        pyplot.imshow(mosaic)
    pyplot.show()
示例#5
0
def display_examples(n_images):
    """
    Display an example of MNIST & CelebA
    :param n_images: number of images to display
    """

    show_n_images = n_images

    get_ipython().magic('matplotlib inline')

    mnist_images = helper.get_batch(glob(os.path.join(DATA_DIR, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
    plt.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')
    plt.show()

    mnist_images = helper.get_batch(glob(os.path.join(DATA_DIR, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
    plt.imshow(helper.images_square_grid(mnist_images, 'RGB'))
    plt.show()
def output_fig(images_array, file_name="./results"):
    plt.figure(figsize=(6, 6), dpi=100)
    print(images_array.shape)
    #plt.imshow(np.transpose(images_array.cpu().detach().numpy(),(1, 2, 0)))
    plt.imshow(helper.images_square_grid(images_array))
    plt.axis("off")
    plt.savefig(file_name + '.png', bbox_inches='tight', pad_inches=0)
    plt.close('all')
示例#7
0
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
    cmap = None if image_mode == 'RGB' else 'gray'
    z_dim = input_z.get_shape().as_list()[-1]
    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])

    samples = sess.run(
        generator(input_z, out_channel_dim, False),
        feed_dict={input_z: example_z})

    images_grid = helper.images_square_grid(samples, image_mode)
    return images_grid
def show_generator_output(sess, n_images, input_z, out_channel_dim):
    """
    Show example output for the generator
    """
    z_dim = input_z.get_shape().as_list()[-1]
    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])

    samples = sess.run(generator(input_z, out_channel_dim, False),
                       feed_dict={input_z: example_z})
    pyplot.imshow(helper.images_square_grid(samples))
    pyplot.show()
    def train(self, epochs, batch_size=256):

        steps = 0
        noise_fix = np.random.normal(0, 1, (9, self.latent_dim))

        valid = np.ones((batch_size, 1))
        fake = np.zeros((batch_size, 1))

        for epoch in range(epochs):
            for batch_images in celeba_dataset.get_batches(batch_size):

                batch_images *= 2

                noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
                gen_imgs = self.generator.predict(noise)
                out_noise = np.random.normal(0, 1, (9, self.latent_dim))
                out_imgs = self.generator.predict(out_noise)

                d_loss_real = self.discriminator.train_on_batch(
                    batch_images, valid)
                d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
                d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

                if steps % 100 == 0:
                    fixed_img = self.generator.predict(noise_fix)
                    plt.imshow(helper.images_square_grid(out_imgs))
                    plt.show()
                    plt.imshow(helper.images_square_grid(fixed_img))
                    plt.show()

                g_loss = self.combined.train_on_batch(noise, valid)

                print("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" %
                      (steps, d_loss[0], 100 * d_loss[1], g_loss))
                steps += 1
                print("epoch = ", epoch)

            epoch += 1
def show_generator_output(sess, n_images, input_z, out_channel_dim,
                          image_mode):
    """
    Show example output for the generator
    :param sess: TensorFlow session
    :param n_images: Number of Images to display
    :param input_z: Input Z Tensor
    :param out_channel_dim: The number of channels in the output image
    :param image_mode: The mode to use for images ("RGB" or "L")
    """
    cmap = None if image_mode == 'RGB' else 'gray'
    z_dim = input_z.get_shape().as_list()[-1]
    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])

    samples = sess.run(generator(input_z, out_channel_dim, False),
                       feed_dict={input_z: example_z})

    if cmap == 'gray':
        pyplot.imshow(np.array(helper.images_square_grid(samples, image_mode)))
        pyplot.show()
    else:
        images_grid = helper.images_square_grid(samples, image_mode)
        pyplot.imshow(images_grid, cmap=cmap)
        pyplot.show()
示例#11
0
def show_generator_output(sess, n_images, input_z, out_channel_dim, image_mode):
    """
    Show example output for the generator
    :param sess: TensorFlow session
    :param n_images: Number of Images to display
    :param input_z: Input Z Tensor
    :param out_channel_dim: The number of channels in the output image
    :param image_mode: The mode to use for images ("RGB" or "L")
    """
    cmap = None if image_mode == 'RGB' else 'gray'
    z_dim = input_z.get_shape().as_list()[-1]
    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])

    samples = sess.run(
        generator(input_z, out_channel_dim, False),
        feed_dict={input_z: example_z})

    images_grid = helper.images_square_grid(samples, image_mode)
    pyplot.imshow(images_grid, cmap=cmap)
    pyplot.show()
示例#12
0
def show_generator_output(sess, n_images, input_z, out_channel_dim,
                          image_mode):

    cmap = None if image_mode == 'RGB' else 'gray'
    z_dim = input_z.get_shape().as_list()[-1]
    example_z = np.random.uniform(-1, 1, size=[n_images, z_dim])

    samples = sess.run(generator(input_z, out_channel_dim, False),
                       feed_dict={input_z: example_z})
    #    print(samples.shape)
    images_grid = helper.images_square_grid(
        samples,
        image_mode)  #if want to show process of tranning uncommnet this line
    pyplot.imshow(
        images_grid,
        cmap=cmap)  #if want to show process of tranning uncommnet this line
    pyplot.axis(
        'off')  #if want to show process of tranning uncommnet this line
    pyplot.show()  #if want to show process of tranning uncommnet this line
    return images_grid  #if want to show process of tranning uncommnet this line
示例#13
0
 def output_fig(self, images_array, file_name):
     plt.figure(figsize=(6, 6), dpi=100)
     plt.imshow(helper.images_square_grid(images_array))
     plt.axis("off")
     plt.savefig(file_name + '.png', bbox_inches='tight', pad_inches=0)
示例#14
0
# [MNIST](http://yann.lecun.com/exdb/mnist/) 是一个手写数字的图像数据集。你可以更改 `show_n_images` 探索此数据集。

# In[2]:

show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
get_ipython().run_line_magic('matplotlib', 'inline')
import os
from glob import glob
from matplotlib import pyplot

mnist_images = helper.get_batch(
    glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')

# ### CelebA
# [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) 是一个包含 20 多万张名人图片及相关图片说明的数据集。你将用此数据集生成人脸,不会用不到相关说明。你可以更改 `show_n_images` 探索此数据集。

# In[3]:

show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
mnist_images = helper.get_batch(
    glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28,
    28, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))
show_n_images = 25
data_dir = './data'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
get_ipython().run_line_magic('matplotlib', 'inline')
import helper
import os
from glob import glob
from matplotlib import pyplot
import numpy as np

mnist_images = helper.get_batch(
    glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(np.array(helper.images_square_grid(mnist_images, 'L')))

# ### CelebA
# The [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset contains over 200,000 celebrity images with annotations.  Since you're going to be generating faces, you won't need the annotations.  You can view the first number of examples by changing `show_n_images`.

# In[3]:

show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
mnist_images = helper.get_batch(
    glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28,
    28, 'RGB')
helper.images_square_grid(mnist_images, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))
示例#16
0
# [MNIST](http://yann.lecun.com/exdb/mnist/) 是一个手写数字的图像数据集。你可以更改 `show_n_images` 探索此数据集。

# In[90]:

show_n_images = 25

"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
get_ipython().magic(u'matplotlib inline')
import os
from glob import glob
from matplotlib import pyplot

mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')


# ### CelebA
# [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) 是一个包含 20 多万张名人图片及相关图片说明的数据集。你将用此数据集生成人脸,不会用不到相关说明。你可以更改 `show_n_images` 探索此数据集。

# In[91]:

show_n_images = 25

"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
mnist_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))
示例#17
0
def output_fig(images_array, file_name):
    plt.figure(figsize=(6, 6), dpi=100)
    plt.imshow(helper.images_square_grid(images_array))
    plt.axis("off")
    plt.savefig(file_name + ".jpg", bbox_inches="tight", pad_inches=0)
    plt.close()
示例#18
0
import helper
import os
from glob import glob
import numpy as np
from matplotlib import pyplot
from distutils.version import LooseVersion
import warnings
import tensorflow as tf
import problem_unittests as tests

show_n_images = 40
celeb_images = helper.get_batch(
    glob(os.path.join('img_align_celeba/*.jpg'))[:show_n_images], 50, 50,
    'RGB')
pyplot.imshow(helper.images_square_grid(celeb_images, 'RGB'))

# pyplot.show()

# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion(
    '1.0'
), 'Please use TensorFlow version 1.0 or newer.  You are using {}'.format(
    tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))

if not tf.test.gpu_device_name():
    warnings.warn(
        'No GPU found. Please use a GPU to train your neural network.')
else:
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
示例#19
0
文件: WGAN.py 项目: Kaiyeh/VRDL2019
            # Train generator
            nd_noise = np.random.normal(0,1,(batch_size, noise_size))
            # nd_noise = np.random.uniform(-1, 1, (batch_size, noise_size))
            gac_sum += G_train.train_on_batch(nd_noise, mones)
            accnt += 1
            if(bcnt%4992==0):
                print('Batch no.', bcnt)
                print('Discriminator ave real loss:', dacr_sum / accnt / n_critic)
                print('Discriminator ave fake loss:', dacf_sum / accnt / n_critic)
                print('Discriminator ave loss:', (dacr_sum[0] + dacf_sum[0]) / accnt / n_critic)
                print('Generator ave loss: %f %f' % tuple(gac_sum / accnt))

                nd_noise = np.random.normal(0, 1, (batch_size, noise_size))
                # nd_noise = np.random.uniform(-1, 1, (batch_size, noise_size))
                fake_img = G.predict(nd_noise)
                hp.images_square_grid(fake_img).save('outputs/' + 'iter-' + str(epch) + '-' + str(int(bcnt/9984)) + '.png')

                if(bcnt%(9984*10)==0):
                    D.save_weights('./weights/D/D' + '-iter-' + str(epch) + '-' + str(bcnt/(9984*10)))
                    G.save_weights('./weights/G/G' + '-iter-' + str(epch) + '-' + str(bcnt/(9984*10)))
                    with open('log.txt','a') as w:
                        w.write('Iter-' + str(epch) + '-' + str(bcnt/(9984*10)) + '\n')
                        w.write(str(dacr_sum / accnt / n_critic) + '\n')
                        w.write(str(dacf_sum / accnt / n_critic) + '\n')
                        w.write(str((dacr_sum[0] + dacf_sum[0]) / accnt / n_critic) + '\n')
                        w.write(str(gac_sum / accnt) + '\n')
                    print('Model saved!')

                accnt = 0
                dacr_sum = np.zeros(2)
                dacf_sum = np.zeros(2)
示例#20
0
import os, time, multiprocessing
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from glob import glob
from data import get_celebA, flags
from model import get_generator, get_discriminator
import helper
import matplotlib.pyplot as plt

plt.switch_backend('agg')

if __name__ == '__main__':
    G = get_generator([None, flags.z_dim])
    G.load_weights('checkpoint/G12.npz')
    plt.figure(figsize=(6, 6), dpi=100)
    for i in range(500):
        z = np.random.normal(loc=0.0, scale=1.0,
                             size=[9, flags.z_dim]).astype(np.float32)
        G.eval()
        result = G(z)
        G.train()
        plt.imshow(helper.images_square_grid(result.numpy()))
        plt.axis("off")
        imagePath = '{}/{:003d}_image.png'.format(flags.sample_dir, i + 1)
        if os.path.exists(imagePath):
            continue
        plt.savefig(imagePath, bbox_inches='tight', pad_inches=0)
    glob(os.path.join(data_dir, 'mnist/*.jpg'))[:show_n_images], 28, 28, 'L')
#pyplot.imshow(helper.images_square_grid(mnist_images, 'L'), cmap='gray')

# ### CelebA
# The [CelebFaces Attributes Dataset (CelebA)](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html) dataset contains over 200,000 celebrity images with annotations.  Since you're going to be generating faces, you won't need the annotations.  You can view the first number of examples by changing `show_n_images`.

# In[3]:

show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
mnist_images = helper.get_batch(
    glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28,
    28, 'RGB')
pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB'))

# ## Preprocess the Data
# Since the project's main focus is on building the GANs, we'll preprocess the data for you.  The values of the MNIST and CelebA dataset will be in the range of -0.5 to 0.5 of 28x28 dimensional images.  The CelebA images will be cropped to remove parts of the image that don't include a face, then resized down to 28x28.
#
# The MNIST images are black and white images with a single [color channel](https://en.wikipedia.org/wiki/Channel_(digital_image%29) while the CelebA images have [3 color channels (RGB color channel)](https://en.wikipedia.org/wiki/Channel_(digital_image%29#RGB_Images).
# ## Build the Neural Network
# You'll build the components necessary to build a GANs by implementing the following functions below:
# - `model_inputs`
# - `discriminator`
# - `generator`
# - `model_loss`
# - `model_opt`
# - `train`
#
# ### Check the Version of TensorFlow and Access to GPU
    IMAGE_MAX_VALUE = 255

    current_index = 0
    while current_index + batch_size <= shape[0]:
        data_batch = get_batch(
            data_files[current_index:current_index + batch_size], *shape[1:3])

        current_index += batch_size

        yield data_batch / IMAGE_MAX_VALUE - 0.5


#%%
test_images = get_batch(glob(os.path.join(data_dir, '*.jpg'))[:10], 32, 32)

pyplot.imshow(helper.images_square_grid(test_images))

#%%

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()


def model_inputs(image_width, image_height, image_channels, z_dim):
    """
    Create the model inputs
    """
    inputs_real = tf.placeholder(tf.float32,
                                 shape=(None, image_width, image_height,
                                        image_channels),
                                 name='input_real')
示例#23
0
def show_images(ids):
    show_n_images = 16
    celeb_images = helper.get_batch(ids[:show_n_images], 28, 28, 'RGB')
    plt.imshow(helper.images_square_grid(celeb_images, 'RGB'))
else:
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))

gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8)

sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

import keras as K
K.backend.clear_session()

# In[ ]:

if __name__ == '__main__':
    bgan = DCGAN()
    bgan.train(epochs=30, batch_size=128)

# In[5]:

for i in range(500):
    noise = np.random.normal(0, 1, (9, 100))
    plt.imshow(helper.images_square_grid(bgan.generator.predict(noise)))
    plt.axis("off")
    plt.savefig("./data/face_images_2/ %d_image.png" % i)
    plt.show()

# In[6]:

bgan.train(epochs=10, batch_size=128)

# In[ ]:
示例#25
0
                           kernel_initializer=rinit)
])

if __name__ == '__main__':
    G.summary()

    lo_it = 3
    lo_ba = 0
    pfix = '-iter-' + str(lo_it) + '-' + str(lo_ba) + '.0'
    try:
        G.load_weights('./weights/G/G' + pfix)
    except:
        print('G weights not found!')
        input()

    G.summary()
    for i in range(500):
        nd_noise = np.random.normal(0, 1, (batch_size, noise_size))
        #genimg = ( * 127.5) + 127.5
        gridimg = hp.images_square_grid(G.predict(nd_noise))
        gridimg.save('imggen/' + ('%03d' % i) + '_image.png')
    '''
    img_size = 64
    batch_size = 9
    path = '../dataset/img_align_celeba/'
    dir = np.array([path + fname for fname in os.listdir(path)])
    select = np.random.randint(0, dir.shape[0], batch_size)
    real_image = (hp.get_batch(dir[select], img_size, img_size, 'RGB') - 127.5) / 127.5
    imgg = hp.images_square_grid(real_image)
    imgg.save('grid.png')
    '''