k = float(options.keep_prob)
l_rate =  options.learn_rate

grad = options.gradient


arr,lab = get_data_from_minst.get_data_from_minst()


data = arr



print options

auto = autoencoder(units,action)

auto.generate_encoder(euris=options.euris)
auto.generate_decoder(symmetric=options.symm)

#auto.pre_train_rbm(data,n_iters=10,learning_rate=float(options.pre_learn_rate),adapt_learn=True)

if(not options.batch):
    bat = None
else:
    from tools.data_manipulation.batch import seq_batch
    bat = seq_batch(data,int(options.n_batch))


auto.train(data,n_iters=int(options.iters),record_weight=True,w_file=options.w_file,use_dropout=True,keep_prob=k,reg_weight=False,reg_lambda=0.0,model_name=options.model_name,batch=bat,display=False,noise=True,gradient=options.gradient,learning_rate=float(options.learn_rate))
Example #2
0
import autoencoder
import math
import numpy as np
from tools.data_manipulation import batch

data = np.loadtxt("../datasets/multipie_rand_sel_space.dat")

data = data+abs(np.min(data))
data = data/np.max(data)
data = data.astype("float32")
int_dim = 100

bat = batch.seq_batch(data,1000)

#units = [data.shape[1],int(math.ceil(data.shape[1]*1.2))+5,int(max(math.ceil(data.shape[1]/4),int_dim+2)+3),
#         int(max(math.ceil(data.shape[1]/10),int_dim+1)),int_dim]

units = [5600,2300,1100,600,200,100]

act = ['sigmoid','sigmoid','sigmoid','sigmoid','sigmoid']
#act = ['relu','relu','relu','relu']
auto = autoencoder.autoencoder(units,act)

auto.generate_encoder()
auto.generate_decoder()

auto.pre_train(data,n_iters=5000)

session = auto.init_network()

ic,bc = auto.train(data,batch=bat,le=False,tau=1.0,session=session,n_iters=2000,display=False,noise=False,noise_level=0.015)
Example #3
0
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tools.data_manipulation import batch

#import input_data
#mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
#n_samples = mnist.train.num_examples

data = np.load("w3.npy")
total_batch = 64
data_batch = batch.seq_batch(data,total_batch)

n_samples = data.shape[0]
batch_size = n_samples/(total_batch)

np.random.seed(0)
tf.set_random_seed(0)


def xavier_init(fan_in,fan_out,constant=1):
    low = -constant*np.sqrt(6.0/(fan_in+fan_out))
    high = constant*np.sqrt(6.0/(fan_in+fan_out))
    
    return tf.random_uniform((fan_in,fan_out),minval=low,maxval=high,dtype=tf.float32)

def norm_init(fan_in,fan_out,mean=0.005,stddev=0.1):
    
    return tf.random_normal((fan_in,fan_out),mean=mean,stddev=stddev,dtype=tf.float32)

class VariationalAutoencoder(object):