def setup(self, X, num_centers, alpha, save_to='dec_model'): sep = X.shape[0]*9/10 X_train = X[:sep] X_val = X[sep:] ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2) if not os.path.exists(save_to+'_pt.arg'): ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) ae_model.save(save_to+'_pt.arg') logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train)) logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val)) else: ae_model.load(save_to+'_pt.arg') self.ae_model = ae_model self.dec_op = DECModel.DECLoss(num_centers, alpha) label = mx.sym.Variable('label') self.feature = self.ae_model.encoder self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec') self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()}) self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu) self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()}) self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args}) self.num_centers = num_centers
def setup(self, X, num_centers, alpha, save_to='dec_model'): sep = X.shape[0] * 9 / 10 X_train = X[:sep] X_val = X[sep:] ae_model = AutoEncoderModel(self.xpu, [X.shape[1], 500, 500, 2000, 10], pt_dropout=0.2) if not os.path.exists(save_to + '_pt.arg'): ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000, 0.1)) ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000, 0.1)) ae_model.save(save_to + '_pt.arg') logging.log(logging.INFO, "Autoencoder Training error: %f" % ae_model.eval(X_train)) logging.log(logging.INFO, "Autoencoder Validation error: %f" % ae_model.eval(X_val)) else: ae_model.load(save_to + '_pt.arg') self.ae_model = ae_model self.dec_op = DECModel.DECLoss(num_centers, alpha) label = mx.sym.Variable('label') self.feature = self.ae_model.encoder self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec') self.args.update({k: v for k, v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()}) self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu) self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k, v in self.args.items()}) self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args}) self.num_centers = num_centers
def setup(self, X, num_centers, alpha, znum, save_to='dec_model'): # Read previously trained _SAE ae_model = AutoEncoderModel(self.xpu, [X.shape[1], 500, 500, 2000, znum], pt_dropout=0.2) ae_model.load( os.path.join( save_to, 'SAE_zsize{}_wimgfeatures_descStats_zeromean.arg'.format( str(znum)))) #_Nbatch_wimgfeatures logging.log( logging.INFO, "Reading Autoencoder from file..: %s" % (os.path.join( save_to, 'SAE_zsize{}_wimgfeatures_descStats_zeromean.arg'. format(znum)))) self.ae_model = ae_model logging.log(logging.INFO, "finished reading Autoencoder from file..: ") self.dec_op = DECModel.DECLoss(num_centers, alpha) label = mx.sym.Variable('label') self.feature = self.ae_model.encoder self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec') self.args.update({ k: v for k, v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments() }) self.args['dec_mu'] = mx.nd.empty( (num_centers, self.ae_model.dims[-1]), ctx=self.xpu) self.args_grad.update({ k: mx.nd.empty(v.shape, ctx=self.xpu) for k, v in self.args.items() }) self.args_mult.update( {k: k.endswith('bias') and 2.0 or 1.0 for k in self.args}) self.num_centers = num_centers self.best_args = {} self.best_args['num_centers'] = num_centers self.best_args['znum'] = znum
decay=0.0, lr_scheduler=mx.lr_scheduler.FactorScheduler( 20000, 0.7), print_every=print_every) ae_model.finetune(train_X, batch_size, finetune_num_iter, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.lr_scheduler.FactorScheduler(20000, 0.1), print_every=print_every) ae_model.save('autoencoder.arg') ae_model.load('autoencoder.arg') print("Training error:", ae_model.eval(train_X)) print("Validation error:", ae_model.eval(val_X)) if visualize: try: from matplotlib import pyplot as plt from model import extract_feature # sample a random image #index = np.random.choice(len(X)) index = 0 original_image = X[index] #print(json.dumps(original_image)) data_iter = mx.io.NDArrayIter({'data': [original_image]}, batch_size=1,
save_to = r'Z:\Cristina\Section3\paper_notes_section3_MODIFIED\save_to\SAEmodels' input_size = combX_allNME.shape[1] latent_size = [input_size / rxf for rxf in [15, 10, 5, 2]] ############ BEST PERFORMIGN of Step 1 nzum = 2x znum = 261 X = combX_allNME y = roi_labels xpu = mx.cpu() ae_model = AutoEncoderModel(xpu, [X.shape[1], 500, 500, 2000, znum], pt_dropout=0.2) print('Loading autoencoder of znum = {}, post training'.format(znum)) ae_model.load( os.path.join( save_to, 'SAE_zsize{}_wimgfeatures_descStats_zeromean.arg'.format(str(znum)))) data_iter = mx.io.NDArrayIter({'data': X}, batch_size=X.shape[0], shuffle=False, last_batch_handle='pad') # extract only the encoder part of the SAE feature = ae_model.encoder zspace = model.extract_feature(feature, ae_model.args, None, data_iter, X.shape[0], xpu).values()[0] # pool Z-space variables datalabels = np.asarray(y) dataZspace = zspace
# pylint: skip-file import mxnet as mx import numpy as np import logging import data from autoencoder import AutoEncoderModel if __name__ == '__main__': # set to INFO to see less information during training logging.basicConfig(level=logging.DEBUG) ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2, internal_act='relu', output_act='relu') X, _ = data.get_mnist() train_X = X[:60000] val_X = X[60000:] ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) ae_model.finetune(train_X, 256, 100000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) ae_model.save('mnist_pt.arg') ae_model.load('mnist_pt.arg') print "Training error:", ae_model.eval(train_X) print "Validation error:", ae_model.eval(val_X)
import logging import mnist_data as data from math import sqrt from autoencoder import AutoEncoderModel if __name__ == '__main__': lv = 1e-2# lv/ln in CDL # set to INFO to see less information during training logging.basicConfig(level=logging.DEBUG) #ae_model = AutoEncoderModel(mx.gpu(0), [784,500,500,2000,10], pt_dropout=0.2, # internal_act='relu', output_act='relu') ae_model = AutoEncoderModel(mx.cpu(2), [784,500,500,2000,10], pt_dropout=0.2, internal_act='relu', output_act='relu') X, _ = data.get_mnist() train_X = X[:60000] val_X = X[60000:] #ae_model.layerwise_pretrain(train_X, 256, 50000, 'sgd', l_rate=0.1, decay=0.0, # lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) #V = np.zeros((train_X.shape[0],10)) V = np.random.rand(train_X.shape[0],10)/10 lambda_v_rt = np.ones((train_X.shape[0],10))*sqrt(lv) ae_model.finetune(train_X, V, lambda_v_rt, 256, 20, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000,0.1)) ae_model.save('mnist_pt.arg') ae_model.load('mnist_pt.arg') print "Training error:", ae_model.eval(train_X,V,lambda_v_rt) #print "Validation error:", ae_model.eval(val_X)
import mxnet as mx import sys sys.path.append("../") sys.path.append("../../autoencoder") import logging import numpy as np from autoencoder import AutoEncoderModel #from visualize import visualize from data_all import news_iterator #ae_model = AutoEncoderModel(mx.gpu(0), [5000,100], pt_dropout=0.5) ae_model = AutoEncoderModel(mx.gpu(3),[5000,100],internal_act='sigmoid', output_act='sigmoid', sparseness_penalty=1e-4, pt_dropout=0) logging.basicConfig(level=logging.DEBUG) #ae_model.load('../../autoencoder/news_20classes_small.arg')#classes_small.arg') #news_20_ltest.arg ae_model.load('news_20classes_small_1e-4_non-neg.arg') batch_size = 100 fea_sym = ae_model.loss.get_internals()#[3] logging.info(fea_sym.list_outputs()) output=fea_sym['sparse_encoder_0_output'] fc3 = mx.symbol.FullyConnected(data=output, num_hidden=20) softmax = mx.symbol.SoftmaxOutput(data=fc3, name='softmax') #logging.info(softmax.list_arguments()) args=ae_model.args datashape=(100,5000) train, val, _ = news_iterator(input_size = 5000,batchsize=100) #fc = softmax.get_internals() #logging.info(fc.list_arguments()) args_shape,ow,aw = softmax.get_internals().infer_shape(data=datashape) #logging.info(args_shape)
def setup(self, X, num_centers, alpha, znum, save_to='dec_model'): self.sep = int(X.shape[0] * 0.75) X_train = X[:self.sep] X_val = X[self.sep:] batch_size = 32 # 160 32*5 = update_interval*5 # Train or Read autoencoder: note is not dependent on number of clusters just on z latent size ae_model = AutoEncoderModel(self.xpu, [X.shape[1], 500, 500, 2000, znum], pt_dropout=0.2) if not os.path.exists(save_to + '_pt.arg'): ae_model.layerwise_pretrain(X_train, batch_size, 50000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler( 20000, 0.1)) ae_model.finetune(X_train, batch_size, 100000, 'sgd', l_rate=0.1, decay=0.0, lr_scheduler=mx.misc.FactorScheduler(20000, 0.1)) ae_model.save(save_to + '_pt.arg') logging.log( logging.INFO, "Autoencoder Training error: %f" % ae_model.eval(X_train)) logging.log( logging.INFO, "Autoencoder Validation error: %f" % ae_model.eval(X_val)) else: ae_model.load(save_to + '_pt.arg') logging.log( logging.INFO, "Reading Autoencoder from file..: %s" % (save_to + '_pt.arg')) logging.log( logging.INFO, "Autoencoder Training error: %f" % ae_model.eval(X_train)) logging.log( logging.INFO, "Autoencoder Validation error: %f" % ae_model.eval(X_val)) self.ae_model = ae_model logging.log(logging.INFO, "finished reading Autoencoder from file..: ") # prep model for clustering self.dec_op = DECModel.DECLoss(num_centers, alpha) label = mx.sym.Variable('label') self.feature = self.ae_model.encoder self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec') self.args.update({ k: v for k, v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments() }) self.args['dec_mu'] = mx.nd.empty( (num_centers, self.ae_model.dims[-1]), ctx=self.xpu) self.args_grad.update({ k: mx.nd.empty(v.shape, ctx=self.xpu) for k, v in self.args.items() }) self.args_mult.update( {k: k.endswith('bias') and 2.0 or 1.0 for k in self.args}) self.num_centers = num_centers self.znum = znum self.batch_size = batch_size self.G = self.ae_model.eval(X_train) / self.ae_model.eval(X_val)
dev=mx.gpu() # train, val, voc = news_iterator(voc_count, 100) vocfile = "/home/tingyubi/20w/data/tfidf_extraction-1_26.voc" with open(vocfile,'r') as f: voc = f.read().decode('utf-8').split(" ") """ data = mx.symbol.Variable('data') fc1 = mx.symbol.FullyConnected(name='encoder_%d'%istack, data=data, num_hidden=20) sig1 = mx.symbol.Activation(data=fc1, act_type='sigmoid') sparse1 = mx.symbol.SparseReg(data=sig1, penalty=1e-3, sparseness_target=0.1) fc2 = mx.symbol.FullyConnected(name='decoder_%d'%istack, data=sparse1, num_hidden=5000) loss = mx.symbol.LinearRegressionOutput(data=fc2, name='softmax') """ aem = AutoEncoderModel(mx.gpu(3),[voc_count,1000,200],internal_act='sigmoid', output_act='sigmoid', sparseness_penalty=1e-4, pt_dropout=0) aem.load('/home/tingyubi/20w/autoencoder/20w_1000_200_1e-4_non-neg.arg') #print aem.loss.get_internals().list_outputs() fc2 = aem.encoder print aem.loss.get_internals().list_outputs() #print aem.loss.get_internals().list_arguments() model = aem #print model.arg_params['encoder_0_weight'].shape """ for k in range(2): words_index=np.argsort(model.args['encoder_0_weight'].asnumpy()[k,:]) logging.info('Topic %d' % k) logging.info([voc[i] for i in words_index[-20:]]) """ for k in range(200):