Ejemplo n.º 1
0
    trainy = mnist.train._labels.astype(np.int32)
    testx = mnist.test._images
    testy = mnist.test._labels.astype(np.int32)
    trainx = 2 * trainx - 1
    testx = 2 * testx - 1
    trainx = np.reshape(trainx, (-1, 28, 28, channel))
    testx = np.reshape(testx, (-1, 28, 28, channel))
    npad = ((0, 0), (2, 2), (2, 2), (0, 0))
    trainx = np.pad(trainx,
                    pad_width=npad,
                    mode='constant',
                    constant_values=-1)
    testx = np.pad(testx, pad_width=npad, mode='constant', constant_values=-1)
elif image_dir == 'svhn':
    channel = 3
    trainx, trainy = svhn_data.load('./data/svhn', 'train')
    testx, testy = svhn_data.load('./data/svhn', 'test')
    trainx = rescale(trainx)
    testx = rescale(testx)
else:
    channel = 3
    trainx, trainy = cifar10_data.load("./data/cifar10", subset='train')
    testx, testy = cifar10_data.load("./data/cifar10", subset='test')
    trainx = np.transpose(trainx, [0, 2, 3, 1])
    testx = np.transpose(testx, [0, 2, 3, 1])

print(trainx.shape)
print(np.max(trainx), np.min(trainx))


def generator_conv(z, reuse=False):
Ejemplo n.º 2
0
parser.add_argument('--learning_rate', type=float, default=0.0003)
args = parser.parse_args()
args.balance = eval(args.balance)
print(args)

# fixed random seeds
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))

# load SVHN data
def rescale(mat):
    return np.transpose(np.cast[th.config.floatX]((-127.5 + mat)/127.5),(3,2,0,1))

datax, datay = svhn_data.load(args.data_dir,'test')
datax = rescale(datax)
trainx = datax[0:10000]
trainx_unl = trainx.copy()
trainx_unl2 = trainx.copy()
trainy = np.load(args.labels)
trainy = trainy[0:10000]
trainy_true = datay.astype(np.int32)[:10000]
nr_batches_train = int(trainx.shape[0]/args.batch_size)

testx = datax[10000:]
testy = datay.astype(np.int32)[10000:]
nr_batches_test = int(np.ceil(float(testx.shape[0])/args.batch_size))

# specify generative model
noise_dim = (args.batch_size, 100)
print(args)

# fixed random seeds
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2**15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2**15)))


# load SVHN data
def rescale(mat):
    return np.transpose(np.cast[th.config.floatX]((-127.5 + mat) / 127.5),
                        (3, 2, 0, 1))


trainx, trainy = svhn_data.load(args.data_dir, 'train')
testx, testy = svhn_data.load(args.data_dir, 'test')
trainx = rescale(trainx)
testx = rescale(testx)
trainx_unl = trainx.copy()
trainx_unl2 = trainx.copy()
nr_batches_train = int(trainx.shape[0] / args.batch_size)
nr_batches_test = int(np.ceil(float(testx.shape[0]) / args.batch_size))

# input layers
noise_dim = (args.batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
x_input = ll.InputLayer(shape=(None, 3, 32, 32))
z_input = ll.InputLayer(shape=noise_dim, input_var=noise)

# specify generative model
Ejemplo n.º 4
0
batch_size_eval=200


'''
data
'''
if valid_flag:
    def rescale(mat):
        return (((-127.5 + mat)/127.5).astype(theano.config.floatX)).reshape((-1, in_channels)+dim_input)
    train_x, train_y, valid_x, valid_y, eval_x, eval_y = load_svhn_small(data_dir, num_val=5000)
    eval_x = valid_x
    eval_y = valid_y
else:
    def rescale(mat):
        return np.transpose(np.cast[theano.config.floatX]((-127.5 + mat)/127.5),(3,2,0,1))
    train_x, train_y = svhn_data.load('/home/chongxuan/mfs/data/svhn/','train')
    eval_x, eval_y = svhn_data.load('/home/chongxuan/mfs/data/svhn/','test')

train_y = np.int32(train_y)
eval_y = np.int32(eval_y)
train_x = rescale(train_x)
eval_x = rescale(eval_x)
x_unlabelled = train_x.copy()

print train_x.shape, eval_x.shape

rng_data = np.random.RandomState(ssl_data_seed)
inds = rng_data.permutation(train_x.shape[0])
train_x = train_x[inds]
train_y = train_y[inds]
x_labelled = []
Ejemplo n.º 5
0
generation_scale = True
z_generated = num_classes
# evaluation
vis_epoch = 1
eval_epoch = 1
'''
data
'''


def rescale(mat):
    return np.transpose(np.cast[theano.config.floatX]((-127.5 + mat) / 127.5),
                        (3, 2, 0, 1))


train_x, train_y = svhn_data.load('./svhn/', 'train')
eval_x, eval_y = svhn_data.load('./svhn/', 'test')

train_y = np.int32(train_y)
eval_y = np.int32(eval_y)
train_x = rescale(train_x)
eval_x = rescale(eval_x)
x_unlabelled = train_x.copy()

print train_x.shape, eval_x.shape

rng_data = np.random.RandomState(ssl_data_seed)
inds = rng_data.permutation(train_x.shape[0])
train_x = train_x[inds]
train_y = train_y[inds]
x_labelled = []
Ejemplo n.º 6
0
    os.makedirs(ckpt_dir)
if not os.path.exists(log_dir):
    os.makedirs(log_dir)

mnist = input_data.read_data_sets('./data/mnist', validation_size=0)
trainx = mnist.train._images
mnist_trainY = mnist.train._labels.astype(np.int32)
testx = mnist.test._images
mnist_testY = mnist.test._labels.astype(np.int32)
mnist_trainX = np.reshape(trainx, (-1, 28, 28, 1))
mnist_testX = np.reshape(testx, (-1, 28, 28, 1))
# npad = ((0, 0), (2, 2), (2, 2), (0, 0))
# mnist_trainX = np.pad(mnist_trainX, pad_width=npad, mode='constant', constant_values=0)
# mnist_testX = np.pad(mnist_testX, pad_width=npad, mode='constant', constant_values=0)

trainx, svhn_trainY = svhn_data.load('./data/svhn', 'train')
testx, svhn_testY = svhn_data.load('./data/svhn', 'test')
svhn_trainX = rescale(trainx)
svhn_testX = rescale(testx)
sigma1 = 100
sigma2 = 100
sigma3 = 100


def classifier(images,
               inp,
               is_training=True,
               num_classes=10,
               reuse=False,
               scope=None):
Ejemplo n.º 7
0
if args.dataset == 'svhn' or args.dataset == 'cifar10':
    gen_final_non = ln.tanh
    num_classes = 10
    dim_input = (32, 32)
    in_channels = 3
    colorImg = True
    generation_scale = True
    if args.dataset == 'svhn':

        def rescale(mat):
            return np.transpose(
                np.cast[theano.config.floatX]((-127.5 + mat) / 127.5),
                (3, 2, 0, 1))

        import svhn_data
        eval_x, eval_y = svhn_data.load('./svhn/', 'test')
        eval_y = np.int32(eval_y)
        eval_x = rescale(eval_x)

    else:

        def rescale(mat):
            return np.cast[theano.config.floatX](mat)

        import cifar10_data
        eval_x, eval_y = cifar10_data.load('./cifar10/', 'test')
        eval_y = np.int32(eval_y)
        eval_x = rescale(eval_x)

elif args.dataset == 'mnist':
    gen_final_non = ln.sigmoid
parser.add_argument('--learning_rate', type=float, default=0.0003)
parser.add_argument('--data_dir', type=str, default='/home/tim/data')
args = parser.parse_args()
print(args)

# fixed random seeds
rng_data = np.random.RandomState(args.seed_data)
rng = np.random.RandomState(args.seed)
theano_rng = MRG_RandomStreams(rng.randint(2 ** 15))
lasagne.random.set_rng(np.random.RandomState(rng.randint(2 ** 15)))

# load SVHN data
def rescale(mat):
    return np.transpose(np.cast[th.config.floatX]((-127.5 + mat)/127.5),(3,2,0,1))

trainx, trainy = svhn_data.load(args.data_dir,'train')
testx, testy = svhn_data.load(args.data_dir,'test')
trainx = rescale(trainx)
testx = rescale(testx)
trainx_unl = trainx.copy()
nr_batches_train = int(trainx.shape[0]/args.batch_size)
nr_batches_test = int(np.ceil(float(testx.shape[0])/args.batch_size))

# specify generative model
noise_dim = (args.batch_size, 100)
noise = theano_rng.uniform(size=noise_dim)
gen_layers = [ll.InputLayer(shape=noise_dim, input_var=noise)]
gen_layers.append(nn.batch_norm(ll.DenseLayer(gen_layers[-1], num_units=4*4*512, W=Normal(0.05), nonlinearity=nn.relu), g=None))
gen_layers.append(ll.ReshapeLayer(gen_layers[-1], (args.batch_size,512,4,4)))
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,256,8,8), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 4 -> 8
gen_layers.append(nn.batch_norm(nn.Deconv2DLayer(gen_layers[-1], (args.batch_size,128,16,16), (5,5), W=Normal(0.05), nonlinearity=nn.relu), g=None)) # 8 -> 16