Example #1
0
    def __init__(self, sess, epoch, batch_size, z_dim, dataset_name,
                 checkpoint_dir, sample_dir, log_dir, mode):
        self.sess = sess
        self.epoch = epoch
        self.batch_size = batch_size
        self.checkpoint_dir = checkpoint_dir
        self.sample_dir = sample_dir
        self.log_dir = log_dir
        self.dataset_name = dataset_name
        self.z_dim = z_dim
        self.random_seed = 1000
        self.gamma = 10  #the scale of gradient penalty
        self.lamb = 0.0002  #the scale of the distance metric used for adaptive margins

        if dataset_name == 'mnist' or dataset_name == 'fashion-mnist':
            #image_dimension
            self.imgH = 28
            self.imgW = 28

            #the size of the first layer of generator
            self.s_size = 3
            #arguments for the last layer of generator
            self.last_dconv = {
                'kernel_size': [5, 5],
                'stride': 1,
                'padding': 'VALID'
            }
            #depths for convolution in generator and discriminator
            self.g_depths = [512, 256, 128, 64]
            self.d_depths = [64, 128, 256, 512]

            #channel
            self.c_dim = 1

            #WGAN parameter, the number of critic iterations for each epoch
            self.d_iters = 1
            self.g_iters = 1

            #train
            self.learning_rate = 0.0002
            self.beta1 = 0.5
            self.beta2 = 0.9

            #test, number of generated images to be saved
            self.sample_num = 100

            #load numpy array of images and labels
            self.images = load_mnist(self.dataset_name)

        elif dataset_name == 'anime':
            #image_dimension
            self.imgH = 64
            self.imgW = 64

            #the size of the first layer of generator
            self.s_size = 4
            #arguments for the last layer of generator, same as the general
            self.last_dconv = {}

            #depths for convolution in generator and discriminator
            self.g_depths = [512, 256, 128, 64]
            self.d_depths = [64, 128, 256, 512]

            #channel dim
            self.c_dim = 3

            #WGAN parameter, the number of critic iterations for each epoch
            self.d_iters = 1
            self.g_iters = 1

            #train
            self.learning_rate = 0.0002
            self.beta1 = 0.5
            self.beta2 = 0.9

            #test, number of generated images to be saved
            self.sample_num = 64

            self.images = load_anime(self.dataset_name)

        else:
            raise NotImplementedError
    def __init__(self, sess, epoch, batch_size, z_dim, dataset_name,
                 checkpoint_dir, sample_dir, log_dir, mode):
        self.sess = sess
        self.epoch = epoch
        self.batch_size = batch_size
        self.checkpoint_dir = checkpoint_dir
        self.sample_dir = sample_dir
        self.log_dir = log_dir
        self.dataset_name = dataset_name
        self.z_dim = z_dim
        self.random_seed = 1000

        if dataset_name == 'mnist' or dataset_name == 'fashion-mnist':
            #image_dimension
            self.imgH = 28
            self.imgW = 28

            #feature map size of the first layer of generator
            self.s_size = 7
            self.g_depth = 64  #first layer
            self.d_depths = [64, 128, 256, 512]
            self.num_layers = 2  #how many times upsampling

            #channel and label dim
            self.c_dim = 1
            self.y_dim = 10

            self.d_iters = 2
            self.g_iters = 1

            self.learning_rate = 0.0002
            self.beta1 = 0.5
            self.beta2 = 0.999

            self.LAMBDA = 5
            self.BETA = 3

            #test, number of generated images to be saved
            self.sample_num = 100

            #load numpy array of images and labels
            self.images, self.labels = load_mnist(self.dataset_name)

        elif dataset_name == 'anime':
            #image_dimension
            self.imgH = 64
            self.imgW = 64

            #feature map size of the first layer of generator
            self.s_size = 16
            self.g_depth = 64  #first layer
            self.d_depths = [64, 128, 256, 512]
            self.num_layers = 2  #how many times upsampling

            #channel and label dim
            self.c_dim = 3
            self.y_dim = 22

            self.d_iters = 2
            self.g_iters = 1

            self.learning_rate = 0.0002
            self.beta1 = 0.5
            self.beta2 = 0.999

            self.LAMBDA = 0.05
            self.BETA = 10

            #test, number of generated images to be saved
            self.sample_num = 64

            self.images, self.labels = load_anime(self.dataset_name)

        else:
            raise NotImplementedError