示例#1
0
文件: my_utils.py 项目: wangmn93/VaDE
def loadFullFashion_MNSIT(shift):
    imgs, y, num_train_data = data.mnist_load('Fashion_MNIST', shift=shift)
    imgs_t, y_t, num_train_data_t = data.mnist_load('Fashion_MNIST',
                                                    dataset='test',
                                                    shift=shift)
    imgs.shape = imgs.shape + (1, )
    imgs_t.shape = imgs_t.shape + (1, )
    return np.concatenate((imgs, imgs_t)), np.concatenate((y, y_t))
def getFashion_MNISTDatapool(batch_size, shift, keep=None):
    if keep is None:
        imgs, y, num_train_data = data.mnist_load('fmnist',shift=shift)
    else:
        imgs, y, num_train_data = data.mnist_load('fmnist', keep=keep, shift=shift)
    print "Total number of training dataset: " + str(num_train_data)
    imgs.shape = imgs.shape + (1,)
    data_pool = utils.MemoryData({'img': imgs, 'label':y}, batch_size)
    return data_pool
示例#3
0
def getMNISTDatapool(batch_size, keep=None, shift=True):
    if keep is None:
        imgs, _, num_train_data = data.mnist_load('MNIST_data', shift=shift)
    else:
        imgs, _, num_train_data = data.mnist_load('MNIST_data',
                                                  keep=keep,
                                                  shift=shift)
    print "Total number of training data: " + str(num_train_data)
    imgs.shape = imgs.shape + (1, )
    data_pool = utils.MemoryData({'img': imgs}, batch_size)
    return data_pool
def getFullFashion_MNISTDatapool(batch_size, shift,keep=None):
    if keep is None:
        imgs, y, num_train_data = data.mnist_load('fmnist',shift=shift)
        imgs_t, y_t, num_train_data_t = data.mnist_load('fmnist',dataset='test',shift=shift)
    else:
        imgs, y, num_train_data = data.mnist_load('fmnist', keep=keep, shift=shift)
        imgs_t, y_t, num_train_data_t = data.mnist_load('fmnist', keep=keep,dataset='test',shift = shift)
    print "Total number of training dataset: " + str(num_train_data + num_train_data_t)
    imgs.shape = imgs.shape + (1,)
    imgs_t.shape = imgs_t.shape + (1,)
    data_pool = utils.MemoryData({'img': np.concatenate((imgs,imgs_t)), 'label':np.concatenate((y,y_t))}, batch_size)
    return data_pool
import tensorflow as tf
import data_mnist as data
import models_mnist as models


""" param """
epoch = 50
batch_size = 64
lr = 0.0002
z_dim = 100
gpu_id = 3

''' data '''
utils.mkdir('./data/mnist/')
data.mnist_download('./data/mnist')
imgs, _, _ = data.mnist_load('./data/mnist')
imgs.shape = imgs.shape + (1,)
data_pool = utils.MemoryData({'img': imgs}, batch_size)


""" graphs """
with tf.device('/gpu:%d' % gpu_id):
    ''' models '''
    generator = models.generator
    discriminator = models.discriminator

    ''' graph '''
    # inputs
    real = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])
示例#6
0
#test code snippet here
import data_mnist as data
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
import my_utils

#extracting subset of mnist
if 0:
    imgs, labels, _ = data.mnist_load('MNIST_data')  #one-hot = False

    for l in range(10):
        print labels[l]

    keep = [0, 9]
    X, Y = [], []
    for x, y in zip(imgs, labels):
        if y in keep:
            X.append(x)
            Y.append(y)
    X = np.array(X)
    Y = np.array(Y)

    for l in range(10):
        img = np.reshape(X[l], [28, 28])
        plt.imshow(img, cmap='gray')
        print Y[l]
        plt.show()

#convert labels to one-hot
if 0:
示例#7
0
    relu = tf.nn.relu
    fc_relu = partial(fc, activation_fn=relu)

    real = tf.placeholder(tf.float32, shape=[None, 784])
    with tf.variable_scope("encoder", reuse=False):
        y = fc_relu(real, 500, scope='layer1')

    with tf.variable_scope("encoder", reuse=True):
        weights = tf.get_variable('layer1/weights')
        biases = tf.get_variable('layer1/biases')
    print(weights.shape)
    print(biases.shape)
    a = 0

if 0:
    imgs, _, _ = data.mnist_load('MNIST_data')
    a = 0

if 0:
    batch_size = 100
    z_dim = 10
    n_cen = 5
    z = tf.get_variable('z', shape=(batch_size, z_dim))
    theta_p = tf.get_variable('theta_p', shape=(n_cen))
    u_p = tf.get_variable('u_p', shape=(n_cen, z_dim))
    lambda_p = tf.get_variable('lambda_p', shape=(n_cen, z_dim))

    theta_p_tensor = tf.expand_dims(theta_p, 0)
    theta_p_tensor = tf.expand_dims(theta_p_tensor, 2)
    theta_p_tensor = tf.tile(theta_p_tensor, [batch_size, 1, 1])
    u_p_tensor = tf.tile(tf.expand_dims(u_p, 0), [batch_size, 1, 1])