コード例 #1
0
def extract_data():
    """
    Extract data from MNIST & CelebA datasets
    """

    helper.download_extract('mnist', DATA_DIR)
    helper.download_extract('celeba', DATA_DIR)
コード例 #2
0
# 由于 CelebA 数据集比较复杂,而且这是你第一次使用 GANs。我们想让你先在 MNIST 数据集上测试你的 GANs 模型,以让你更快的评估所建立模型的性能。
#
# 如果你在使用 [FloydHub](https://www.floydhub.com/), 请将 `data_dir` 设置为 "/input" 并使用 [FloydHub data ID](http://docs.floydhub.com/home/using_datasets/) "R5KrjnANiKVhLWAkpXhNBe".

# In[1]:

data_dir = './data'

# FloydHub - Use with data ID "R5KrjnANiKVhLWAkpXhNBe"
#data_dir = '/input'
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper

helper.download_extract('mnist', data_dir)
helper.download_extract('celeba', data_dir)

# ## 探索数据(Explore the Data)
# ### MNIST
# [MNIST](http://yann.lecun.com/exdb/mnist/) 是一个手写数字的图像数据集。你可以更改 `show_n_images` 探索此数据集。

# In[2]:

show_n_images = 25
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
get_ipython().run_line_magic('matplotlib', 'inline')
import os
from glob import glob
コード例 #3
0
# 如果你在使用 [FloydHub](https://www.floydhub.com/), 请将 `data_dir` 设置为 "/input" 并使用 [FloydHub data ID](http://docs.floydhub.com/home/using_datasets/) "R5KrjnANiKVhLWAkpXhNBe".

# In[88]:

data_dir = './data'

# FloydHub - Use with data ID "R5KrjnANiKVhLWAkpXhNBe"
#data_dir = '/input'


"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper

helper.download_extract('mnist', data_dir)
helper.download_extract('celeba', data_dir)


# ## 探索数据(Explore the Data)
# ### MNIST
# [MNIST](http://yann.lecun.com/exdb/mnist/) 是一个手写数字的图像数据集。你可以更改 `show_n_images` 探索此数据集。

# In[90]:

show_n_images = 25

"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
get_ipython().magic(u'matplotlib inline')
コード例 #4
0
import torchvision.utils as vutils
from torchvision.utils import save_image

import torch.nn as nn
import torch.nn.functional as F
import torch
#os.environ["CUDA_VISIBLE_DEVICES"] = "2"
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.6 # 每个GPU现存上届控制在60%以内
# config.gpu_options.visible_device_list = '2'
# session = tf.compat.v1.Session(config=config)#tf.compat.v1.keras.backend.set_session(session)
sys.argv

import helper
data_dir = './data'
helper.download_extract(data_dir)


# In[21]:


device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')
print(device)


# In[22]:


z_dim= 100
class Generator(nn.Module):
    def __init__(self):
コード例 #5
0
ファイル: GAN.py プロジェクト: sidoff8/GANS
import mlflow
import tensorflow as tf
print(tf.__version__)
import helper
helper.download_extract('celeba', 'data/')


# In[2]:
import os
from glob import glob
import numpy as np
from matplotlib import pyplot
show_n_images = 25
data_dir='data/'

celeba_images = helper.get_batch(glob(os.path.join(data_dir, 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
pyplot.imshow(helper.images_square_grid(celeba_images, 'RGB'))


# In[3]:


celeba_dataset = helper.Dataset('celeba', glob(os.path.join(data_dir, 'img_align_celeba/*.jpg')))


# In[4]:


celeba_dataset.shape
コード例 #6
0
    tests.test_model_inputs(model_inputs)
    tests.test_discriminator(discriminator, tf)
    tests.test_generator(generator, tf)
    tests.test_model_loss(model_loss)
    tests.test_model_opt(model_opt, tf)

    # 학습 Parameters
    show_n_images = 25
    batch_size = 64
    z_dim = 100
    learning_rate = 0.00025
    beta1 = 0.45
    epochs = 100 # 시간/장비의 제약으로 인해 11번째 epoch 까지만 진행

    # celeba.zip 데이터셋 다운로드
    helper.download_extract('celeba', './celeba')
    mnist_images = helper.get_batch(glob(os.path.join('./celeba', 'img_align_celeba/*.jpg'))[:show_n_images], 28, 28, 'RGB')
    pyplot.imshow(helper.images_square_grid(mnist_images, 'RGB')) # 샘플 데이터 1개 보여주기

    # 다운로드 받은 celeba 이미지들을 학습시킬 수 있는 데이터셋으로 변환
    celeba_dataset = helper.Dataset('celeba', glob(os.path.join('./celeba', 'img_align_celeba/*.jpg')))

    # GPU 사용 여부 확인
    if not tf.test.gpu_device_name():
        warnings.warn('GPU가 없습니다. - GPU 사용 권장')
    else:
        print('사용중인 GPU: {}'.format(tf.test.gpu_device_name()))

    # 실제 학습시키고 output을 생성하는 부분 (시간이 오래 걸립니다)
    with tf.Graph().as_default():
        train(epochs, batch_size, z_dim, learning_rate, beta1, celeba_dataset.get_batches, celeba_dataset.shape, celeba_dataset.image_mode)
コード例 #7
0
ファイル: WGAN.py プロジェクト: Kaiyeh/VRDL2019
import helper as hp
import tensorflow as tf
import os
import numpy as np
import random
from PIL import Image
import tensorflow.keras.backend as K
from functools import partial

path = '../dataset/img_align_celeba/'
hp.download_extract('../dataset')

batch_size = 64
noise_size = 100
sl5 = 4
img_size = 64
rinit = tf.random_normal_initializer(stddev=0.02)

# Generator def
G = tf.keras.models.Sequential([
    tf.keras.layers.Dense(units = 512*sl5*sl5, input_dim=noise_size, kernel_initializer=rinit),
    tf.keras.layers.Reshape(target_shape=[sl5,sl5,512]),
    tf.keras.layers.BatchNormalization(),
    tf.keras.layers.LeakyReLU(),

    tf.keras.layers.UpSampling2D(),
    tf.keras.layers.Conv2D(filters=256, kernel_size=5, strides=1, padding='same', kernel_initializer=rinit),
    tf.keras.layers.BatchNormalization(momentum=0.8),
    tf.keras.layers.LeakyReLU(),

    tf.keras.layers.UpSampling2D(),