Пример #1
0
def Test_rbm(learning_rate=0.1, k=1, training_epochs=1000):
    data = np.array([[1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0,
                                          0], [1, 1, 1, 0, 0, 0],
                     [0, 0, 1, 1, 1, 0], [0, 0, 1, 0, 1, 0],
                     [0, 0, 1, 1, 1, 0]])

    rng = np.random.RandomState(123)

    # construct RBM
    # rbm = GBRBM(input=data, n_visible=6, n_hidden=3, momentum=0.95, use_tqdm=True, sample_visible=True)
    gbrbm = GBRBM(n_visible=6,
                  n_hidden=3,
                  learning_rate=0.01,
                  momentum=0.95,
                  use_tqdm=True,
                  sample_visible=True)
    errs = gbrbm.fit(data, n_epoches=1000, batch_size=10)
    plt.plot(errs)

    # test
    v = np.array([
        [1, 1, 1, 0, 0, 0],
        [1, 0, 1, 0, 0, 0],
    ])
    res = gbrbm.reconstruct(v)

    res = np.array(res)
    print(res)
Пример #2
0
def Test_gb_rbm():
    mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
    mnist_images = mnist.train.images

    gbrbm = GBRBM(n_visible=784,
                  n_hidden=64,
                  learning_rate=0.01,
                  momentum=0.95,
                  use_tqdm=True,
                  sample_visible=True)
    errs = gbrbm.fit(mnist_images, n_epoches=30, batch_size=10)
    plt.plot(errs)
    plt.show()

    #检查一些重建数据:
    def show_digit(x):
        plt.imshow(x.reshape((28, 28)), cmap=plt.cm.gray)
        plt.show()

    IMAGE = 1
    image = mnist_images[IMAGE]
    image_rec = gbrbm.reconstruct(image.reshape(1, -1))

    show_digit(image)
    show_digit(image_rec)
Пример #3
0
	def encoder_gbrbm(self, n_hidden=1000, lr=0.01, n_epoches=10, batch_size=100):
		"""
		"""

		n_visible        = len(self.dictionary)
		training_dataset = corpus2dense(self.training_corpus, num_terms=n_visible).transpose()
		self.rbm = GBRBM(n_visible, n_hidden=n_hidden, learning_rate=lr, momentum=0.95, \
			             err_function='mse', use_tqdm=False, sample_visible=False, sigma=1)
		self.rbm.fit(training_dataset, n_epoches=n_epoches, batch_size=batch_size, \
			         shuffle=True, verbose=True)
		self.corpus_rbm = self.rbm.transform(training_dataset)
def make_GBRBM_kfold(train_data, valid_data, Image_type, k, save_path):
 ##############% K-fold based restricted boltzmann machine for training
    # train the model
    gbrbm = GBRBM(n_visible=train_data.shape[1], n_hidden=1000, learning_rate=0.01, momentum=0.9, use_tqdm=True)
    train_data, valid_data, data_mean = data_demean(train_data,valid_data)
    errs, errs_val = gbrbm.fit(train_data, valid_data, n_epoches=640, batch_size=20)
    foldName = str(k)+'_fold/'+Image_type+'/'
    createFolder(save_path+foldName)
    plt.plot(errs)
    plt.show()
    plt.savefig(save_path+foldName+'train.png')
    plt.plot(errs_val)
    plt.savefig(save_path+foldName+'val.png')
    plt.show()
    np.save(save_path+foldName+'data_mean.npy', data_mean) 
    gbrbm.save_weights(filename = save_path+foldName, name = Image_type+'_model') 
Пример #5
0
def dbn(rbm_hidden_num,
        rbm_visible_size,
        rbm_hidden_size,
        rbm_x,
        rbm_type='GBRBM'):
    weights = []
    biases = []
    for i in range(rbm_hidden_num):
        # 训练rbm
        if i == 0 and rbm_type == 'GBRBM':
            rbm = GBRBM(n_visible=rbm_visible_size,
                        n_hidden=rbm_hidden_size,
                        learning_rate=0.01,
                        momentum=0.95,
                        use_tqdm=False)
        else:
            rbm = BBRBM(n_visible=rbm_visible_size,
                        n_hidden=rbm_hidden_size,
                        learning_rate=0.01,
                        momentum=0.95,
                        use_tqdm=False)
        errs = rbm.fit(rbm_x, n_epoches=10, batch_size=100, verbose=True)
        rbm_x = rbm.transform(rbm_x)
        rbm_w, vb, rbm_b = rbm.get_weights()
        rbm_visible_size = rbm_hidden_size
        weights.append(rbm_w)
        biases.append(rbm_b)
    return weights, biases
Пример #6
0
def make_GBRBM(Image_type, image_type):
    # path and name of the data
    data_path = '../data/'
    data_name = 'TBSS_' + Image_type + '_Rawimage_249.mat'
    X_data = loadmat(data_path + data_name)
    X_data = X_data[Image_type + '_image'].astype(np.float32)

    # normalize the data
    if Image_type != 'FA':
        X_s_max = X_data.max(axis=0)
        X_data /= X_s_max

    X_data = data_normalization(X_data)
    # separate validation and testing dataset
    n_train = 229
    n_val = 20
    X_train = X_data[:n_train]
    X_val = X_data[-n_val:]

    # train the model
    gbrbm = GBRBM(n_visible=X_data.shape[1],
                  n_hidden=1000,
                  learning_rate=0.001,
                  momentum=0.9,
                  use_tqdm=True)
    errs, errs_val = gbrbm.fit(X_train, X_val, n_epoches=2500, batch_size=10)

    # save the model
    save_path = '../models/GBRBM/rbm_' + image_type + '/'
    save_name = image_type + '_model'
    # plot the results
    plt.plot(errs)
    plt.show()
    plt.savefig(save_path + save_name + '_train.png')
    plt.plot(errs_val)
    plt.savefig(save_path + save_name + '_val.png')
    plt.show()
    #    gbrbm.save_weights(filename=save_path, name=save_name)

    return gbrbm
Пример #7
0
def DBN_DNN(inp, nClasses, depth, width, batch_size=2048):
    RBMs = []
    weights = []
    bias = []
    # batch_size = inp.shape
    nEpoches = 5
    if len(inp.shape) == 3:
        inp = inp.reshape((inp.shape[0] * inp.shape[1], inp.shape[2]))
    sigma = np.std(inp)
    # sigma = 1
    rbm = GBRBM(n_visible=inp.shape[-1],
                n_hidden=width,
                learning_rate=0.002,
                momentum=0.90,
                use_tqdm=True,
                sample_visible=True,
                sigma=sigma)
    rbm.fit(inp, n_epoches=15, batch_size=batch_size, shuffle=True)
    RBMs.append(rbm)
    for i in range(depth - 1):
        print 'training DBN layer', i
        rbm = BBRBM(n_visible=width,
                    n_hidden=width,
                    learning_rate=0.02,
                    momentum=0.90,
                    use_tqdm=True)
        for e in range(nEpoches):
            batch_size *= 1 + (e * 0.5)
            n_batches = (inp.shape[-2] / batch_size) + (
                1 if inp.shape[-2] % batch_size != 0 else 0)
            for j in range(n_batches):
                stdout.write("\r%d batch no %d/%d epoch no %d/%d" %
                             (int(time.time()), j + 1, n_batches, e, nEpoches))
                stdout.flush()
                b = np.array(inp[j * batch_size:min((j + 1) *
                                                    batch_size, inp.shape[0])])
                for r in RBMs:
                    b = r.transform(b)
                rbm.partial_fit(b)
        RBMs.append(rbm)
    for r in RBMs:
        (W, _, Bh) = r.get_weights()
        weights.append(W)
        bias.append(Bh)
    model = mlp1(x_train.shape[1], nClasses, depth - 1, width)
    print len(weights), len(model.layers)
    assert len(weights) == len(model.layers) - 1
    for i in range(len(weights)):
        W = [weights[i], bias[i]]
        model.layers[i].set_weights(W)
    return model
def make_GBRBMtransform_kfold(train_data, valid_data, Image_type, k, save_path):
 ##############% Transform the data from the first layer with Gaussian kernal RBM
    gbrbm = GBRBM(n_visible=train_data.shape[1], n_hidden=1000, learning_rate=0.01, momentum=0.9, use_tqdm=True)
    train_data, valid_data, data_mean = data_demean(train_data,valid_data)
    foldName = str(k)+'_fold/'+Image_type+'/'   
    gbrbm.load_weights(filename = save_path+foldName, name = Image_type+'_model')
    # transform the training and testing dataset
    transform_data_train = np.zeros([train_data.shape[0] , 1000]).astype(np.float32)
    for i in range(0,train_data.shape[0]):
        transform_data_train[i,:] = gbrbm.transform(train_data[i,:].reshape(1,-1))
        
    transform_data_val = gbrbm.transform(valid_data)      
    return transform_data_train, transform_data_val
Пример #9
0
def make_transform(Image_type, image_type):
    # transform the data into lower dimension by using RBM
    data_path = '../data/'
    data_name = 'TBSS_' + Image_type + '_Rawimage_249.mat'
    X_data = loadmat(data_path + data_name)
    X_data = X_data[Image_type + '_image'].astype(np.float32)

    # normalize the data
    if Image_type != 'FA':
        X_s_max = X_data.max(axis=0)
        X_data /= X_s_max

    X_data = data_normalization(X_data)
    # separate validation and testing dataset
    n_train = 229
    n_val = 20
    X_train = X_data[:n_train]
    X_val = X_data[-n_val:]
    # save the model
    save_path = '../models/GBRBM/rbm_' + image_type + '/'
    save_name = image_type + '_model'
    # train the model
    gbrbm = GBRBM(n_visible=X_data.shape[1],
                  n_hidden=1000,
                  learning_rate=0.01,
                  momentum=0.9,
                  use_tqdm=True)
    # loading weights from the path
    gbrbm.load_weights(filename=save_path, name=save_name)
    # transform training and validation dataset
    transform_data_train = np.zeros([n_train, 1000]).astype(np.float32)

    for i in range(0, n_train):
        transform_data_train[i, :] = gbrbm.transform(X_train[i, :].reshape(
            1, -1))

    transform_data_val = gbrbm.transform(X_val)
    return transform_data_train, transform_data_val
Пример #10
0
#

# In[60]:

from tfrbm import BBRBM, GBRBM
print(red_data.shape)

# In[61]:

import numpy as np
import matplotlib.pyplot as plt
from tfrbm import GBRBM

gbrbm = GBRBM(n_visible=25,
              n_hidden=9,
              learning_rate=0.01,
              momentum=0.95,
              use_tqdm=True)
errs = gbrbm.fit(red_data, n_epoches=300, batch_size=10)
plt.plot(errs)
plt.show()

# In[24]:

# errs = gbrbm.fit(red_data, n_epoches=300, batch_size=20)
# plt.plot(errs)
# plt.show()

# In[62]:

rbm_hidden = gbrbm.transform(red_data)
Пример #11
0
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix

# Loading Training and Testing pataches, extracted from images and flattened using extract_patches.py
f = open('data_all_normalised.pickle', 'rb')
data = pickle.load(f)
train_array = np.array(data['train_x'])
test_array = np.array(data['test_x'])
y_train = np.array(data['train_y'])
y_test = np.array(data['test_y'])

# Training Restricted BoltzMann Machine
rbm = GBRBM(11 * 11,
            60,
            learning_rate=0.001,
            momentum=0,
            err_function='mse',
            use_tqdm=True,
            sample_visible=False,
            sigma=1)
errs = rbm.fit(train_array,
               n_epoches=100,
               batch_size=32,
               shuffle=True,
               verbose=True)
#Extracting features using trained RBM
features_train = rbm.transform(train_array)
features_test = rbm.transform(test_array)

#Training and generating the results from Random Forest Classifier.
clf = RandomForestClassifier(n_estimators=200, criterion='entropy')
clf.fit(features_train, y_train)
Пример #12
0
import imageio as io
import tensorflow as tf
import numpy as np
import pickle
import time
import os
from tfrbm import GBRBM
from sklearn.ensemble import RandomForestClassifier

patch_size = 15
f = open(models + 'model_15.pickle', 'rb')
model = pickle.load(f)
rbm = GBRBM(patch_size * patch_size,
            60,
            learning_rate=0.001,
            momentum=0,
            err_function='mse',
            use_tqdm=True,
            sample_visible=False,
            sigma=1)
rbm.set_weights(model['rbm'][0], model['rbm'][1], model['rbm'][2])
rf = model['RF']


def predict(data):
    features = rbm.transform(data)
    return rf.predict(features)


def image_segment(img, patch_size):
    mat = np.array(img)
    img = np.array(img)
Пример #13
0
        train_X = np.reshape(train_x, (-1, time_step * input_size))
        train_Y = train_y
        test_X = np.reshape(test_x, (-1, time_step * input_size))
        test_Y = test_y
        rbm_x = train_X
        rbm_visible_size = time_step * input_size
        weights = []
        biases = []

        with tf.device('/gpu:%d' % gpu_device):
            for i in range(rbm_hidden_num):
                # 训练rbm
                if i == 0 and rbm_type == 'GBRBM':
                    rbm = GBRBM(n_visible=rbm_visible_size,
                                n_hidden=rbm_hidden_size,
                                learning_rate=0.01,
                                momentum=0.95,
                                use_tqdm=False)
                else:
                    rbm = BBRBM(n_visible=rbm_visible_size,
                                n_hidden=rbm_hidden_size,
                                learning_rate=0.01,
                                momentum=0.95,
                                use_tqdm=False)
                errs = rbm.fit(rbm_x,
                               n_epoches=10,
                               batch_size=100,
                               verbose=True)
                rbm_x = rbm.transform(rbm_x)
                rbm_W, vb, rbm_b = rbm.get_weights()
                rbm_visible_size = rbm_hidden_size
import numpy as np
#import pylab as pl
import sys
sys.path.append(".")

import matplotlib.pyplot as plt
from tfrbm import BBRBM, GBRBM
#from tensorflow.examples.tutorials.mnist import input_data
import input_data

mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
mnist_images = mnist.train.images

gbrbm = GBRBM(n_visible=784,
              n_hidden=500,
              learning_rate=1.0,
              momentum=0.95,
              use_tqdm=True,
              sample_visible=False)
errs = gbrbm.fit(mnist_images, n_epoches=1, batch_size=100)
plt.plot(errs)
plt.show()


#检查一些重建数据:
def show_digit(x):
    plt.imshow(x.reshape((28, 28)), cmap=plt.cm.gray)
    plt.show()


IMAGE = 1
image = mnist_images[IMAGE]
Пример #15
0
#prepare the noisy data set
noisy_data_path = "DataSet/train/noisy_speech/"
input_data = prepare_data(file_path=noisy_data_path)

#begin pretraining Gaussian-Bernoulli RBM
gb_n_visible = input_data.shape[1]
gb_n_hid = 2048
gb_learning_rate = 0.01
gb_momentum = 0.95
gb_err_function = 'mse'
sigma = 1

gbrbm = GBRBM(n_visible=gb_n_visible,
              n_hidden=gb_n_hid,
              learning_rate=gb_learning_rate,
              momentum=gb_momentum,
              err_function=gb_err_function,
              use_tqdm=False,
              sample_visible=True,
              sigma=sigma)

gb_n_epoches = 40
gb_batch_size = 128

errs = gbrbm.fit(data_x=input_data,
                 n_epoches=gb_n_epoches,
                 batch_size=gb_batch_size,
                 shuffle=True,
                 verbose=True)

gb_filename = 'pretrain_models/gbrbm.ckpt'
gb_name = 'rbm'
Пример #16
0
layer_num = 1
max_epoch = 20000
dropout_keep_rate = 0.9

x = np.hstack((flow_normalized, speed_normalized, occupancy_normalized))
y = np.hstack((flow_normalized, speed_normalized, occupancy_normalized))
train_x, train_y, test_x, test_y = create_train_test(x, y, time_step,
                                                     train_num, test_num)

for rbm_hidden in [400]:
    with tf.device('/gpu:%d' % gpu_device):
        # 训练rbm
        _, _, input_size = train_x.shape
        rbm = GBRBM(n_visible=input_size,
                    n_hidden=rbm_hidden,
                    learning_rate=0.01,
                    momentum=0.95,
                    use_tqdm=True)
        errs = rbm.fit(train_x[:, 0, :],
                       n_epoches=10,
                       batch_size=100,
                       verbose=True)
        rbm_W, vb, rbm_b = rbm.get_weights()

    # 存储运行结果的文件
    # date_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))
    file_name = 'fso_fso_lstm_gbrbm'
    train_mre_result = []
    test_mre_result = []
    test_mae_result = []
    test_rmse_result = []
Пример #17
0
class Feature(catscorpus.CatsCorpus, utils.Config):
	"""
	
	"""

	def __init__(self, root_path, is_tfidf=False):
		catscorpus.CatsCorpus.__init__(self, root_path=root_path)
		# Select training corpus
		self.is_tfidf = is_tfidf
		if self.is_tfidf:
			self.training_corpus = self.tfidf  # Take tfidf matrxi as input
		else:
			self.training_corpus = self.corpus # Take bow corpus as input

	def encoder_lda(self, num_topics=100, chunksize=500):
		"""
		
		"""

		self.num_topics = num_topics
		# Train LDA based on training dataset
		self.lda = LdaModel(corpus=self.training_corpus, id2word=self.dictionary, \
			                num_topics=num_topics, update_every=1, chunksize=chunksize, passes=1)
		# Convert bow into topic vectors
		self.corpus_lda = self.lda[self.training_corpus]

	def encoder_lsi(self, num_components=100, chunksize=500, is_tfidf=False):
		"""
		
		"""

		self.num_components = num_components
		# Train LSI based on training dataset
		self.lsi = LsiModel(corpus=self.training_corpus, id2word=self.dictionary, \
		                           num_topics=num_components, chunksize=chunksize) # initialize an LSI transformation
		# Convert bow into LSI projections
		self.corpus_lsi = self.lsi[self.training_corpus]

	def encoder_gbrbm(self, n_hidden=1000, lr=0.01, n_epoches=10, batch_size=100):
		"""
		"""

		n_visible        = len(self.dictionary)
		training_dataset = corpus2dense(self.training_corpus, num_terms=n_visible).transpose()
		self.rbm = GBRBM(n_visible, n_hidden=n_hidden, learning_rate=lr, momentum=0.95, \
			             err_function='mse', use_tqdm=False, sample_visible=False, sigma=1)
		self.rbm.fit(training_dataset, n_epoches=n_epoches, batch_size=batch_size, \
			         shuffle=True, verbose=True)
		self.corpus_rbm = self.rbm.transform(training_dataset)

	def save_gbrbm(self, model_path=None, output_path=None):
		"""
		"""
		
		model_path  = "%s/%s" % (model_path, "model")
		output_path = "%s/%s" % (output_path, "npy.mat.txt")

		# if model_path:
			# self.rbm.save(model_path)
		if output_path:
			# numpy_matrix = corpus2dense(self.corpus_lda, num_terms=self.num_topics)
			np.savetxt(output_path, self.corpus_rbm, delimiter=',')


	def save_lda(self, model_path=None, output_path=None):
		"""

		"""

		model_path  = "%s/%s" % (model_path, "model")
		output_path = "%s/%s" % (output_path, "npy.mat.txt")

		if model_path:
			self.lda.save(model_path)
		if output_path:
			numpy_matrix = corpus2dense(self.corpus_lda, num_terms=self.num_topics).transpose()
			np.savetxt(output_path, numpy_matrix, delimiter=',')

	def save_lsi(self, model_path=None, output_path=None):
		"""

		"""

		model_path  = "%s/%s" % (model_path, "model")
		output_path = "%s/%s" % (output_path, "npy.mat.txt")

		if model_path:
			self.lsi.save(model_path)
		if output_path:
			numpy_matrix = corpus2dense(self.corpus_lsi, num_terms=self.num_components).transpose()
			np.savetxt(output_path, numpy_matrix, delimiter=',')

	def random_sampling(self, num_samples):
		catscorpus.CatsCorpus.random_sampling(self, num_samples)
		# Select training corpus
		if self.is_tfidf:
			self.training_corpus = self.tfidf  # Take tfidf matrxi as input
		else:
			self.training_corpus = self.corpus # Take bow corpus as input

	def category_sampling(self, categories):
		catscorpus.CatsCorpus.category_sampling(self, categories)
		# Select training corpus
		if self.is_tfidf:
			self.training_corpus = self.tfidf  # Take tfidf matrxi as input
		else:
			self.training_corpus = self.corpus # Take bow corpus as input


	def __iter__(self):
		pass



		
Пример #18
0
import numpy as np
import matplotlib.pyplot as plt
from tfrbm import BBRBM, GBRBM, GDBM, GGRBM, ReluRBM, TanhRBM, LeakyReluRBM
import tensorflow as tf

confs = np.genfromtxt("h2o_scf.txt")
gbrbm = GBRBM(n_visible=3, n_hidden=12, learning_rate=0.001, sigma=0.7, err_function='mse')
gbrbm_err, _ = gbrbm.fit(confs, n_epoches=20, batch_size=20)

fig1, ax1 = plt.subplots()
ax1.plot(gbrbm_err)
ax1.set_xlabel('Epochs')
ax1.set_ylabel('MSE')
plt.show()

sims = gbrbm.simulate("simulated.out", confs=10000, postprocess=True)