def test_all(self, n): _dbn=DBN([784,1000,500,250,30],learning_rate=0.01,cd_k=1) _dbn.pretrain(mnist.train.images,128,50) _nnet = NN([784, 1000, 500, 250, 30, 250, 500, 1000, 784], 0.01, 128, 50) _nnet.load_from_dbn_to_reconstructNN(_dbn) _nnet.train(mnist.train.images, mnist.train.images) _nnet.test_linear(mnist.test.images, mnist.test.images) x_in = mnist.test.images[:30] _predict = _nnet.predict(x_in) _predict_img = np.concatenate(np.reshape(_predict, [-1, 28, 28]), axis=1) x_in = np.concatenate(np.reshape(x_in, [-1, 28, 28]), axis=1) img = Image.fromarray( (1.0-np.concatenate((_predict_img, x_in), axis=0))*255.0) img = img.convert('L') img.save(str(n)+'_.jpg') img2 = Image.fromarray( (np.concatenate((_predict_img, x_in), axis=0))*255.0) img2 = img2.convert('L') img2.save(str(n)+'.jpg') nnet_encoder=NN() nnet_encoder.load_layers_from_NN(_nnet,0,4) # featrue=nnet_encoder.predict(mnist.test.images) nnet_decoder=NN() nnet_decoder.load_layers_from_NN(_nnet,5,8)
def test_another_rbmtrain(self, n): _dbn = DBN([784, 1000, 500, 250, 30], learning_rate=0.01, cd_k=1) print(len(mnist.train.images)) for j in range(5): for i in range(10): _dbn.pretrain(mnist.train.images[i * 5500:i * 5500 + 5500], 128, 5) _nnet = NN([784, 1000, 500, 250, 30, 250, 500, 1000, 784], 0.01, 128, 50) _nnet.load_from_dbn_to_reconstructNN(_dbn) _nnet.train(mnist.train.images, mnist.train.images) _nnet.test_linear(mnist.test.images, mnist.test.images) x_in = mnist.test.images[:30] _predict = _nnet.predict(x_in) _predict_img = np.concatenate(np.reshape(_predict, [-1, 28, 28]), axis=1) x_in = np.concatenate(np.reshape(x_in, [-1, 28, 28]), axis=1) img = Image.fromarray((1.0 - np.concatenate( (_predict_img, x_in), axis=0)) * 255.0) img = img.convert('L') img.save(str(n) + '_.jpg') img2 = Image.fromarray((np.concatenate( (_predict_img, x_in), axis=0)) * 255.0) img2 = img2.convert('L') img2.save(str(n) + '.jpg')
def train_classifier(X, y): """ Trains a classifier using best known parameters on given data / labels. :param X: Samples, a numpy array of (N, n_vis) shape where N is number of samples and n_vis number of visible varliables (sample dimensionality). :param y: Labels, a numpy array of (N, 1) shape. Each lable should be a label index. """ # split data into minibatches X_mnb, y_mnb = util.create_minibatches(X, y, __CLASS_COUNT * 20) # create a DBN and pretrain dbn = DBN([32 * 24, 600, 600], __CLASS_COUNT) pretrain_params = [[80, 0.05, True, 1, 0.085, 0.1], [80, 0.05, True, 1, 0.000, 0.0]] dbn.pretrain(X_mnb, y_mnb, pretrain_params) # fine-tuning mlp = dbn.to_mlp() mlp.train(X_mnb, y_mnb, 1000, 0.1) return mlp
def _perform(self): p = self.params class_count = p[0] layer_sizes = p[1] # make a copy of rbm-param list because we might # modify it for training, and we don't want to # affect the original rbm_params = list(p[2]) dbn = DBN(layer_sizes, class_count) # train the first RBM as an RbmJob (to be able to reuse) # assuming the RBN is more then one layer deep if len(layer_sizes) >= 2: first_rbm_params = [class_count, layer_sizes[0], layer_sizes[1]] first_rbm_params.extend(rbm_params[0]) first_rbm_job = RbmJob(first_rbm_params, self.X_train) if not first_rbm_job.is_done(): first_rbm_job.perform() # create the DBN, then replace the first # RBM with the one already trained dbn.rbms[0] = first_rbm_job.results()[0] # make sure it's skipped in DBN training rbm_params[0] = None train_res = dbn.pretrain(self.X_train, self.y_train, rbm_params) return (dbn, train_res)