Example #1
0
    def fit(self, x, y, iterations=1, shuffle=True):
        data = as_matrix(x)
        response = y

        self.layers[0] = nn.layer(len(data[0]), bias=self.layers[0].bias)

        # initialize the hidden layers with stacked autoencoders
        if self.pretrain:
            out = x
            for index in range(1, len(self.layers) - 1):
                layer = self.layers[index]
                trained = autoencoder(len(layer.visible))
                trained.fit(out)
                out = trained.predict(out)
                layer.weights = trained.layers[1].weights

        errors = []
        for iteration in range(iterations):
            if shuffle:
                inds = numpy.random.permutation(range(len(data)))
                data = data[inds]
                response = response[inds]

            for row_ind, row in enumerate(data):
                # propagate forward
                self.propagate_forward(row)

                # propagate backward -- the input weights never change
                layer = self.layers[-1]
                target = response[row_ind]
                errors.append(sum(target - layer.visible))

                # do hidden layers
                for layer_index in range(len(self.layers) - 1, 0, -1):
                    layer = self.layers[layer_index]
                    input_layer = self.layers[layer_index - 1]
                    target = layer.propagate_backward(input_layer, target)

        return errors
Example #2
0
def __autoencoder_mnist__():
    X_train, y_train, X_val, y_val, X_test, y_test = load_mnist()
    X_train = X_train.reshape(X_train.shape[0], -1)
    X_val   = X_val.reshape(X_val.shape[0], -1)
    X_test  = X_test.reshape(X_test.shape[0], -1)    
    
    print 'Train data shape: ',   X_train.shape
    print 'Train labels shape: ', y_train.shape
    print 'Test data shape: ',    X_test.shape
    print 'Test labels shape: ',  y_test.shape
    print ''
    
    ninput  = 28*28
    nhidden = 100 

    net = autoencoder(layer_units=(ninput, nhidden, ninput), bias=False, act_func = 'sigmoid', 
                      loss_type='euclidean', seed=12)
    tic = time.time()
    stats = net.train_with_SGD(X_train, learning_rate=0.1, learning_rate_decay=0.95, reg=0.001, 
                               num_iters=2000, batchsize=128, mu=0.9)
    toc = time.time()
    print toc-tic, 'sec elapsed'
    print 'overall loss: ', net.loss(X_train, reg=0.01, opt='test')
    plot_net_output(net, stats, X_train)
Example #3
0

units = [784,10]
action = ['softplus']

l_rate =  0.01

grad = "gradient"


f = open("../datasets/train-images.idx3-ubyte","r")
arr = np.fromfile(f, '>u1', 60000 * 28 * 28).reshape((60000, 784))
max_value = 0xFF

arr = arr.astype(float)
arr -= max_value / 2.0
arr /= max_value

data = arr

print data.shape

auto = autoencoder(units,action)

auto.generate_encoder()
auto.generate_decoder()


auto.train(data,n_iters=1000,batch=None,display=False,noise=False,gradient=grad,learning_rate=l_rate)

Example #4
0
 def train_ae(self):
     ae = autoencoder(self.myVanHat)
     return ae
Example #5
0
import numpy as np
from autoencoder import autoencoder

data = np.random.rand(100,20).astype("float32")

auto = autoencoder([20,15,10],['sigmoid','sigmoid'])


auto.generate_encoder(euris=True)
auto.generate_decoder(symmetric=False)

auto.pre_train(data)

auto.train(data,n_iters=20,record_weight=True,reg_weight=False,learning_rate=20.0,reg_lambda=1.0,batch=None,display=False,noise=False)
Example #6
0
numTrain = round(len(labeledSet) / 2)
trainSet = labeledSet[1:numTrain]
testSet = labeledSet[numTrain + 1 :]
trainData = train_set[0][trainSet, :]
trainLabels = train_set[1][trainSet]
testData = train_set[0][testSet, :]
testLabels = train_set[1][testSet]

# unlabeled
unlabeledSet = np.where(train_set[1] >= 5)[0]
unlabeledData = train_set[0][unlabeledSet, :]


############# train
iterations = 200
model = autoencoder(input_layer_size, hidden_layer_size, beta, rho, lambd)
input_data = unlabeledData.T
theta = scipy.optimize.minimize(
    model.autoencoder_Cost_Grad,
    x0=model.theta,
    args=(input_data,),
    method="L-BFGS-B",
    jac=True,
    options={"maxiter": iterations},
)


W1 = theta.x[0 : model.W1_dim].reshape(hidden_layer_size, input_layer_size)
b1 = theta.x[model.W1_dim + model.W2_dim : model.W1_dim + model.W2_dim + model.b1_dim].reshape(hidden_layer_size, 1)

showHiddenIMAGES(W1, patch_size, hidden_patch_size)
Example #7
0
data = data+abs(np.min(data))
data = data/np.max(data)
data = data.astype("float32")
int_dim = 100

bat = batch.seq_batch(data,1000)

#units = [data.shape[1],int(math.ceil(data.shape[1]*1.2))+5,int(max(math.ceil(data.shape[1]/4),int_dim+2)+3),
#         int(max(math.ceil(data.shape[1]/10),int_dim+1)),int_dim]

units = [5600,2300,1100,600,200,100]

act = ['sigmoid','sigmoid','sigmoid','sigmoid','sigmoid']
#act = ['relu','relu','relu','relu']
auto = autoencoder.autoencoder(units,act)

auto.generate_encoder()
auto.generate_decoder()

auto.pre_train(data,n_iters=5000)

session = auto.init_network()

ic,bc = auto.train(data,batch=bat,le=False,tau=1.0,session=session,n_iters=2000,display=False,noise=False,noise_level=0.015)

print 'Init: ',ic,' Best:',bc

mid = auto.get_hidden(data,session=session)

out = auto.get_output(data,session=session)
Example #8
0
import numpy
from autoencoder import autoencoder
from encoder import encoder

if __name__ == "__main__":
	### autoencoder parameter
	iteration = 100
	learning = 0.005
	x_length = 1000
	y_length = 100
	prob = 0.0
	filepath = 'lr.txt'
	w_path = 'lr/h1_1000to100_w.txt'
	bias_path = 'lr/h1_1000to100_eb.txt'
	readpath1 = 'lr.txt'
	readpath2 = 'lt.txt'
	### stacked denoising autoencoder
	print 'sda start'
	ob1 = autoencoder(iteration, learning, x_length, y_length, filepath, prob)
	ob1.sda()
	ob1.encfile(readpath1)
	ob1.encfile(readpath2)
	print 'autoencoder finish'
	
"""
	print 'encode start'
	ob2 = encoder(w_path, bias_path)
	ob2.encfile(readpath1)
	ob2.encfile(readpath2)
	print 'finish'
"""
Example #9
0
from autoencoder import autoencoder
import tensorflow
import numpy as np





auto = autoencoder([3,5,2],['softplus','softplus'])

auto.generate_encoder(euris=True)
auto.generate_decoder(symmetric=False)

data = np.random.rand(20,3).astype("float32");

s = auto.init_network()

print s.run(auto.layers[1].W)

par = auto.pre_train_rbm(data,learning_rate=0.0001)

print par[2]
print auto.session.run(auto.layers[1].W)

hidden_layer_size_1 = 200 # 200                               # hidden layer 1 Size
hidden_layer_size_2 = 200  #200                              # hidden layer 2 Size

rho = 0.1                                                # desired average activation of the hidden units.                        
beta = 3                                                 # weight of sparsity penalty term   

####### load data
f = gzip.open('/Users/wanyanxie/Summer_2014_Study/Deep_Learning/mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()

####debug
#train_set= (train_set[0][1:50],train_set[1][1:50])
#######  Train the first sparse autoencoder
iterations = 200
model_1=autoencoder(input_layer_size, hidden_layer_size_1, beta, rho, lambd)
theta_1= scipy.optimize.minimize(model_1.autoencoder_Cost_Grad, x0=model_1.theta, 
                                 args = (train_set[0].T,), 
                                 method = 'L-BFGS-B', 
                                 jac = True, 
                                 options = {'maxiter': iterations}) 
                                  
W1_1 = theta_1.x[0:model_1.W1_dim].reshape(hidden_layer_size_1,input_layer_size)
b1_1 = theta_1.x[model_1.W1_dim+model_1.W2_dim: model_1.W1_dim+model_1.W2_dim +model_1.b1_dim].reshape(hidden_layer_size_1,1) 

#######  Train the second sparse autoencoder
iterations = 200
feedForward_train_1 = feedForwardAutoencoder(W1_1,b1_1, train_set[0].T)
a2_train = feedForward_train_1.hidden_layer_activiation()

model_2=autoencoder(hidden_layer_size_1, hidden_layer_size_2, beta, rho, lambd)
Example #11
0
#plt.imshow(Temp,interpolation='nearest')
#plt.show()
#################################################################################
##### n effet on a 4 carre par image, si forte ressemblance critere texte on merge les 4 images de chacun pour le test on prend param de lènsemble + proche
# Pour chaque carre on retient le future high level au niveau local 4 carre et 48 feauture ie de dimension 4*3*4*4  = 192
Sample_Feature_train = np.zeros((X_train.shape[0],size_Feature_Autocodeur))
Sample_Feature_val = np.zeros((X_val.shape[0],size_Feature_Autocodeur))

type_LvsG =2  ##  Local
for Rang_Image in range(Size_train):
    if (Rang_Image % 50 == 0):
        print('Feature Creation for image :',Rang_Image)
    Index_Image =Rang_Image
    #X_Sub_Image = V_X_Carre_train[:,0:Index_Image+1,:,:,:]
    #Y_Sub_Image = V_Y_Carre_train[:,0:Index_Image+1,:,:,:]
    Auto_encoder_local = autoencoder(size_input_Local,Size_Encode_L,Size_Epoch,BATCH_SIZE_Enc,V_X_Carre_train[:,Index_Image,:,:,:],V_Y_Carre_train[:,Index_Image,:,:,:],type_LvsG,size_Feature_Kernel)
    Auto_encoder_local.build_network(X_d,X_g)
    Auto_encoder_local.learn(V_X_Carre_train[:,Index_Image,:,:,:],V_Y_Carre_train[:,Index_Image,:,:,:])
    V_encode = Auto_encoder_local.encode((V_X_Carre_train[:,Index_Image,:,:,:]).astype('float32'))
    V_encode = np.reshape(V_encode,size_Feature_Autocodeur)
    #V_encode += 1
    #V_encode *= (255/2)
    #V_encode = np.uint8(V_encode)
    # on laisse la valeur entre -1 et 1
    Sample_Feature_train[Index_Image,:] = V_encode
                        
for Rang_Image in range(Size_val):
    if (Rang_Image % 50 == 0):
        print('Feature Creation for image :',Rang_Image)
    Index_Image =Rang_Image
    #X_Sub_Image = V_X_Carre_train[:,0:Index_Image+1,:,:,:]