示例#1
0
def _download():
    """
    Download the MNIST dataset if it is not present.
    :return: The train, test and validation set.
    """
    data = load_mnist_realval(os.path.join(env_paths.get_data_path("mnist"), "mnist.pkl.gz"))
    train_x, train_t, valid_x, valid_t, test_x, test_t = data
    return (train_x, train_t), (test_x, test_t), (valid_x, valid_t)
示例#2
0
    mean: (batch_size, num_latent)
    logvar: (batch_size, num_latent)
    '''
    mean = mean.dimshuffle(0,'x','x',1)
    logvar = logvar.dimshuffle(0,'x','x',1)
    return c - logvar/2 - (x - mean)**2 / (2 * T.exp(logvar))

def standard_normal(x):
    return c - x**2 / 2

def bernoullisample(x):
    return np.random.binomial(1,x,size=x.shape).astype(theano.config.floatX)


### LOAD DATA AND SET UP SHARED VARIABLES
train_x, train_t, valid_x, valid_t, test_x, test_t = load_mnist_realval()
train_x = np.concatenate([train_x,valid_x])
num_features=train_x.shape[-1]

sh_x_train = theano.shared(np.asarray(bernoullisample(train_x), dtype=theano.config.floatX), borrow=True)
sh_x_test = theano.shared(np.asarray(bernoullisample(test_x), dtype=theano.config.floatX), borrow=True)

#dummy test data for testing the implementation
X = np.ones((batch_size,784),dtype='float32')


### MODEL SETUP
# Recognition model q(z|x)
l_in = lasagne.layers.InputLayer((None, num_features))
l_enc_h1 = lasagne.layers.DenseLayer(l_in, num_units=nhidden, name='ENC_DENSE1', nonlinearity=nonlin_enc)
l_enc_h1 = lasagne.layers.DenseLayer(l_enc_h1, num_units=nhidden, name='ENC_DENSE2', nonlinearity=nonlin_enc)
示例#3
0
generation_scale = False
z_generated = num_classes
weight_d2 = 1e-4
weight_d3 = 1e-4
# evaluation
vis_epoch = 10
eval_epoch = 1
print('alpha_stage1:', alpha_stage1)
print('alpha_stage2:', alpha_stage2)
print('pre_num_epoch:', pre_num_epoch)
print('num_epochs:', num_epochs)
print('optim_flag:', optim_flag)
'''
data
'''
train_x, train_y, valid_x, valid_y, eval_x, eval_y = load_mnist_realval(
    data_dir)
if valid_flag:
    eval_x = valid_x
    eval_y = valid_y
else:
    train_x = np.concatenate([train_x, valid_x])
    train_y = np.hstack((train_y, valid_y))
train_y = np.int32(train_y)
eval_y = np.int32(eval_y)
train_x = train_x.astype('float32')
eval_x = eval_x.astype('float32')
x_unlabelled = train_x.copy()

rng_data = np.random.RandomState(1)
inds = rng_data.permutation(train_x.shape[0])
train_x = train_x[inds]
示例#4
0
logfile = os.path.join(results_out, 'logfile.log')

#SYMBOLIC VARS
sym_x = T.matrix()
sym_lr = T.scalar('lr')


#Helper functions
def bernoullisample(x):
    return np.random.binomial(1, x, size=x.shape).astype(theano.config.floatX)


### LOAD DATA
if dataset is 'sample':
    print "Using real valued MNIST dataset to binomial sample dataset after every epoch "
    train_x, train_t, valid_x, valid_t, test_x, test_t = load_mnist_realval()
    del train_t, valid_t, test_t
    preprocesses_dataset = bernoullisample
else:
    print "Using fixed binarized MNIST data"
    train_x, valid_x, test_x = load_mnist_binarized()
    preprocesses_dataset = lambda dataset: dataset  #just a dummy function

#concatenate train and validation set
train_x = np.concatenate([train_x, valid_x])

train_x = train_x.astype(theano.config.floatX)
test_x = test_x.astype(theano.config.floatX)

nfeatures = train_x.shape[1]
n_train_batches = train_x.shape[0] / batch_size