Exemple #1
0
if __name__ == '__main__':
    if len(sys.argv) < 3 or len(sys.argv) > 4:
        print 'usage: python train_auto_rbm.py pcd/cd cd-k [output_dir]'
        sys.exit()
    else:
        use_pcd = (sys.argv[1] == 'pcd')
        cd_k = int(sys.argv[2])
        output_dir = None if len(sys.argv) == 3 else sys.argv[3]

    decoder_dir = 'noise_02_deep_model1'
    dataset = os.path.join(decoder_dir, 'encoded_cifar10.pkl')
    train_xs = cPickle.load(file(dataset, 'rb'))
    num_imgs = train_xs.shape[0]
    train_xs = train_xs.reshape(num_imgs, -1)

    print train_xs.shape
    batch_size = 100
    lr = 0.001 if use_pcd else 0.1

    rbm = RBM(train_xs[0].size, 1000, output_dir)

    train(rbm,
          train_xs,
          lr,
          500,
          batch_size,
          use_pcd,
          cd_k,
          output_dir,
          decoder_dir=decoder_dir)
Exemple #2
0
class Ops(object):
    pass

weights      = None  #weights None gets them initialized randomly
visible_bias = None  #visible bias None gets them initialized at zero
hidden_bias  = None  #hidden bias None gets them initialized at zero

# Load the MC configuration training data:
trainFileName = 'data/train.txt'
xtrain        = np.loadtxt(trainFileName)
ept           = np.random.permutation(xtrain) # random permutation of training data
iterations_per_epoch = xtrain.shape[0] / bsize  

# Initialize the RBM 
rbm = RBM(num_hidden=num_hidden, num_visible=num_visible,num_state_vis=num_state_vis, num_state_hid=num_state_hid, weights=weights, visible_bias=visible_bias,hidden_bias=hidden_bias, num_samples=num_samples) 

# Initialize operations and placeholders classes
ops          = Ops()
placeholders = Placeholders()
placeholders.visible_samples = tf.placeholder(tf.float32, shape=(None, num_visible*num_state_vis), name='v') # placeholder for training data

total_iterations = 0 # starts at zero 
ops.global_step  = tf.Variable(total_iterations, name='global_step_count', trainable=False)
learning_rate    = tf.train.exponential_decay(
    learning_rate_start,
    ops.global_step,
    100 * xtrain.shape[0]/bsize,
    1.0 # decay rate = 1 means no decay
)
 
Exemple #3
0
    return rbm.run_hidden(hidden_layer)
    
def printRecommenders(recommender, news, user):
    print("-----------------------------------------------------")
    for i in range(len(user.listNews[0])):
        if user.listNews[0,i] == 0 and recommender[0,i] == 1:
            print("Nome: %s , Recomendação: %s" %(user.nome,news[i]))



news = ["Sem reforma, déficit das previdências estaduais em 2060 deve ser 4 vezes maior que o de 2013, aponta estudo", "Relator da reforma da Previdência se reúne com Maia e líderes para debater parecer", "Lava Jato: 8 parlamentares esperam STF decidir se viram réus", 
          "Revoltado com punição, Vettel reclama muito e coloca placa de 2º lugar à frente de carro de Hamilton", "Brasil goleia Honduras por 7 a 0 na maior vitória sob o comando de Tite", "Portugal bate Holanda e se sagra campeão da Liga das Nações"]

userTrain = np.array([[1,1,1,0,0,0],
                      [1,0,1,0,0,0],
                      [1,1,1,0,0,0],
                      [0,0,1,1,1,1],
                      [0,0,1,1,0,1],
                      [0,0,1,1,0,1]])

users = []
users.append(User("José", np.array([[1,1,0,1,0,0]])))
users.append(User("Maria", np.array([[0,0,0,1,1,0]])))

rbm = RBM(num_visible=6, num_hidden=2)
rbm.train(userTrain, max_epochs=5000)
rbm.weights

for user in users:
    recommender = recommenderNews(rbm, user.listNews)
    printRecommenders(recommender, news, user)
Exemple #4
0
import os
import itertools
import numpy as np
import fcntl
import copy
from string import Template
import mlpython.datasets.store as dataset_store
import mlpython.mlproblems.generic as mlpb
from rbm import RBM
#from autoencoder import Autoencoder

print "Loading dataset..."
trainset,validset,testset = dataset_store.get_classification_problem('ocr_letters')
print "Train RBM for 10 iterations... (this might take a few minutes)"
rbm = RBM(n_epochs = 10,
          hidden_size = 200,
          lr = 0.01,
          CDk = 1,
          seed=1234
          )

rbm.train(mlpb.SubsetFieldsProblem(trainset))
rbm.show_filters()

Exemple #5
0
def rbm_instance():
    rbmobject1 = RBM(17, 40, ['rbmw1', 'rbvb1', 'rbmhb1'], 0.001)
    rbmobject2 = RBM(40, 4, ['rbmw2', 'rbvb2', 'rbmhb2'], 0.001)
    return rbmobject1, rbmobject2
Exemple #6
0
    def __init__(self,
                 n_in,
                 n_out=4,
                 hidden_layers_sizes=[2048, 2048, 50, 2048, 2048]):
        assert len(hidden_layers_sizes) > 0
        self.rbm_layers = []
        self.sess = tf.Session()

        self.x = tf.placeholder(tf.float32, shape=None)
        self.y = tf.placeholder(tf.float32, shape=None)

        #构筑DBN
        for i in range(len(hidden_layers_sizes)):
            if i == 0:
                layer_input = self.x
                input_size = n_in
            else:
                input_size = hidden_layers_sizes[i - 1]
            # 隐层
            bound_val = 4.0 * np.sqrt(6.0 /
                                      (input_size + hidden_layers_sizes[i]))
            W = tf.Variable(tf.random_uniform(
                [input_size, hidden_layers_sizes[i]],
                minval=-bound_val,
                maxval=bound_val),
                            dtype=tf.float32,
                            name="W{}".format(i))
            b = tf.Variable(tf.zeros([
                hidden_layers_sizes[i],
            ]),
                            dtype=tf.float32,
                            name="b{}".format(i))
            #sum_W = tf.matmul(layer_input, W) + b
            sum_W = tf.add(tf.matmul(layer_input, W),
                           b,
                           name="HiddenLayer{}".format(i))
            t_layer_input = tf.nn.sigmoid(sum_W)
            if i > 0 and hidden_layers_sizes[i - 1] > hidden_layers_sizes[i]:
                self.DBF = t_layer_input
            # 创建RBM层
            self.rbm_layers.append(
                RBM(inpt=layer_input,
                    n_visiable=input_size,
                    n_hidden=hidden_layers_sizes[i],
                    W=W,
                    hbias=b))
            if i > 0 and hidden_layers_sizes[i] > hidden_layers_sizes[i - 1]:
                self.DBF = self.rbm_layers[i].input
            layer_input = t_layer_input

        W = tf.Variable(
            tf.zeros([hidden_layers_sizes[-1], n_out], dtype=tf.float32))
        b = tf.Variable(tf.zeros([
            n_out,
        ]), dtype=tf.float32)
        self.output = tf.nn.softmax(tf.matmul(layer_input, W) + b)
        self.y_pred = tf.argmax(self.output, axis=1)
        self.loss = -tf.reduce_mean(
            tf.reduce_sum(self.y * tf.log(self.output),
                          axis=1))  #cross_entropy
        correct_pred = tf.equal(self.y_pred, tf.argmax(self.y, axis=1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
Exemple #7
0
    def __init__(self,
                 numpy_rng,
                 theano_rng=None,
                 n_ins=784,
                 hidden_layers_sizes=[500, 500],
                 n_outs=10,
                 L1_reg=0,
                 L2_reg=0,
                 first_layer='grbm',
                 model=None):
        """This class is made to support a variable number of layers.

        :type numpy_rng: numpy.random.RandomState
        :param numpy_rng: numpy random number generator used to draw initial
                    weights

        :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
        :param theano_rng: Theano random generator; if None is given one is
                           generated based on a seed drawn from `rng`

        :type n_ins: int
        :param n_ins: dimension of the input to the DBN

        :type hidden_layers_sizes: list of ints
        :param hidden_layers_sizes: intermediate layers size, must contain
                               at least one value

        :type n_outs: int
        :param n_outs: dimension of the output of the network
        """

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)
        self.L1 = 0
        self.L2_sqr = 0

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = MRG_RandomStreams(numpy_rng.randint(2**30))

        # allocate symbolic variables for the data
        self.x = T.matrix('x')  # the data is presented as rasterized images
        self.y = T.ivector('y')  # the labels are presented as 1D vector
        # of [int] labels
        # end-snippet-1
        # The DBN is an MLP, for which all weights of intermediate
        # layers are shared with a different RBM.  We will first
        # construct the DBN as a deep multilayer perceptron, and when
        # constructing each sigmoidal layer we also construct an RBM
        # that shares weights with that layer. During pretraining we
        # will train these RBMs (which will lead to chainging the
        # weights of the MLP as well) During finetuning we will finish
        # training the DBN by doing stochastic gradient descent on the
        # MLP.

        for i in xrange(self.n_layers):
            # construct the sigmoidal layer

            # the size of the input is either the number of hidden
            # units of the layer below or the input size if we are on
            # the first layer
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]

            # the input to this layer is either the activation of the
            # hidden layer below or the input of the DBN if you are on
            # the first layer
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[i - 1].output

            if model is None:
                W = None
                b = None
            else:
                W = model[i * 2]
                b = model[i * 2 + 1]

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        W=W,
                                        b=b,
                                        activation=T.nnet.sigmoid)

            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)
            self.L1 += (abs(sigmoid_layer.W).sum())
            self.L2_sqr += ((sigmoid_layer.W**2).sum())

            # its arguably a philosophical question...  but we are
            # going to only declare that the parameters of the
            # sigmoid_layers are parameters of the DBN. The visible
            # biases in the RBM are parameters of those RBMs, but not
            # of the DBN.
            self.params.extend(sigmoid_layer.params)

            # Construct an RBM that shared weights with this layer
            if i == 0:  # first layer GBRBM - dealing with continous value
                if first_layer == 'grbm':
                    rbm_layer = GRBM(numpy_rng=numpy_rng,
                                     theano_rng=theano_rng,
                                     input=layer_input,
                                     n_visible=input_size,
                                     n_hidden=hidden_layers_sizes[i],
                                     W=sigmoid_layer.W,
                                     hbias=sigmoid_layer.b)
                if first_layer == 'rbm':
                    rbm_layer = RBM(numpy_rng=numpy_rng,
                                    theano_rng=theano_rng,
                                    input=layer_input,
                                    n_visible=input_size,
                                    n_hidden=hidden_layers_sizes[i],
                                    W=sigmoid_layer.W,
                                    hbias=sigmoid_layer.b)
            # elif i == self.n_layers-1: # last layer GGRBM
            # rbm_layer = GRBM(numpy_rng=numpy_rng,
            # theano_rng=theano_rng,
            # input=layer_input,
            # n_visible=input_size,
            # n_hidden=hidden_layers_sizes[i],
            # W=sigmoid_layer.W,
            # hbias=sigmoid_layer.b)
            else:  # subsequence layers BBRBM - binary RBM to cope with regularization
                rbm_layer = RBM(numpy_rng=numpy_rng,
                                theano_rng=theano_rng,
                                input=layer_input,
                                n_visible=input_size,
                                n_hidden=hidden_layers_sizes[i],
                                W=sigmoid_layer.W,
                                hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)

        # We now need to add a logistic layer on top of the MLP
        if model is None:
            W = None
            b = None
        else:
            W = model[-2]
            b = model[-1]
        self.logLayer = LogisticRegression(
            input=self.sigmoid_layers[-1].output,
            n_in=hidden_layers_sizes[-1],
            W=W,
            b=b,
            n_out=n_outs)
        self.params.extend(self.logLayer.params)

        self.L1 += (abs(self.logLayer.W).sum())

        self.L2_sqr += ((self.logLayer.W**2).sum())

        # compute the cost for second phase of training, defined as the
        # negative log likelihood of the logistic regression (output) layer
        self.finetune_cost = (self.logLayer.negative_log_likelihood(self.y) +
                              +L1_reg * self.L1 + L2_reg * self.L2_sqr)

        # compute the gradients with respect to the model parameters
        # symbolic variable that points to the number of errors made on the
        # minibatch given by self.x and self.y
        self.errors = self.logLayer.errors(self.y)
        self.predprobs = self.logLayer.p_y_given_x
        self.preds = self.logLayer.y_pred
Exemple #8
0
    ae_folder = 'prod/cifar10_ae2_relu_%d' % cifar10_ae.RELU_MAX
    ae = AutoEncoder(Cifar10Wrapper.load_default(), cifar10_ae.encode,
                     cifar10_ae.decode, cifar10_ae.RELU_MAX, ae_folder)
    ae.build_models(ae_folder)  # load model

    encoded_dataset = Cifar10Wrapper.load_from_h5(
        os.path.join(ae_folder, 'encoded_cifar10.h5'))
    assert len(encoded_dataset.x_shape) == 1

    num_hid = 2000
    output_folder = os.path.join(ae_folder, 'test_pretrain')
    # weights_file = os.path.join(
    #     output_folder, 'ptrbm_hid2000_lr0.1_cd1', 'epoch_100_rbm.h5')
    weights_file = '/home/hhu/Developer/dem/prod/cifar10_ae2_relu_6/ptrbm_scheme0/ptrbm_hid2000_lr0.1_cd1/epoch_100_rbm.h5'
    rbm = RBM(None, None, weights_file)
    # rbm = RBM(encoded_dataset.x_shape[0], num_hid, None)

    # train_config = utils.TrainConfig(
    #     lr=0.1, batch_size=100, num_epoch=100, use_pcd=False, cd_k=1)
    train_config = utils.TrainConfig(lr=0.01,
                                     batch_size=100,
                                     num_epoch=200,
                                     use_pcd=True,
                                     cd_k=5)

    pretrain(sess, rbm, encoded_dataset, ae.decoder, train_config,
             utils.vis_cifar10, output_folder)

    # utils.initialize_uninitialized_variables_by_keras()
    # h = sess.run(rbm._compute_up(encoded_dataset.test_xs))
Exemple #9
0
training_set[training_set == 1] = 0
training_set[training_set == 2] = 0
training_set[training_set >= 3] = 1

test_set[test_set == 0] = -1  # not rated
test_set[test_set == 1] = 0
test_set[test_set == 2] = 0
test_set[test_set >= 3] = 1

# Number of movies is the number of visible units
config.n_vis = len(training_set[0])
# This tunable parameter is the number of features that we want to detect (number of hidden units)
config.n_hid = 100

# Create the model object RBM()
rbm = RBM(config.n_vis, config.n_hid)

config.batch_size_ = 512  # set batch size to be 512 (tunable)
reconerr = []  # keep track of reconstruction error
config.nb_epoch = 50  # run for 50 epochs

# Train the RBM
# First for loop - go through every single epoch
for epoch in range(1, config.nb_epoch + 1):
    train_recon_error = 0  # RMSE reconstruction error initialized to 0 at the beginning of training
    s = 0.  # a counter (float type)

    # Second for loop - go through every single user
    # Lower bound is 0, upper bound is (nb_users - batch_size_), batch_size_ is the step of each batch (512)
    # The 1st batch is for user with ID = 0 to user with ID = 511
    for id_user in range(0, config.nb_users - config.batch_size_, config.batch_size_):
Exemple #10
0
bits = vec_bin_array(np.arange(N), d).astype(np.float)
for i in range(N):
    mat = bits[i, :].reshape(1, -1).T.dot(np.ones(shape=(1, d)))
    dat[2 * i, :] = mat.flatten()
    dat[2 * i + 1, :] = mat.T.flatten()

ctx = mx.cpu()
dat = nd.array(dat, ctx=ctx)
N = dat.shape[0]
m = d * d
n = m

# Train RBM using CD-k
mx.random.seed(123)
np.random.seed(123)
cd1 = RBM(m, n, ctx=ctx)
res_cd1 = cd1.train_cdk(dat, batch_size=N, epochs=2000, lr=0.1,
                        k=1, nchain=1000,
                        report_freq=1, exact_loglik=True)
# np.savetxt("cd1.txt", np.array(res_cd1))

mx.random.seed(123)
np.random.seed(123)
ucd = RBM(m, n, ctx=ctx)
res_ucd = ucd.train_ucd(dat, batch_size=N, epochs=2000, lr=0.1,
                        min_mcmc=1, max_mcmc=100, nchain=1000,
                        report_freq=1, exact_loglik=True)
# np.savetxt("ucd_loglik.txt", np.array(res_ucd[0]))
# np.savetxt("ucd_tau.txt", np.array(res_ucd[1]))
# np.savetxt("ucd_disc.txt", np.array(res_ucd[2]))
    interval_count = int((max_noise - min_noise) / noise_int)
    fileStr = 'Patt_Misclassification_Mean_P' + str(
        text_array_float.shape[0]) + '_RBM_hmin' + str(
            np.min(hidden_nodes_list)) + '_hmax' + str(
                np.max(hidden_nodes_list)) + '_trsz' + str(image_count)
    fileName = dirName + fileStr
    fig = plt.figure()
    fig.set_size_inches(18.5, 10.5)

    line_id = 0

    for hidden_nodes in hidden_nodes_list:
        print('Hidden Node Count:', hidden_nodes)

        r = RBM(num_visible=neuron_count, num_hidden=hidden_nodes)

        #training_data = np.copy(text_array_float)

        print('Training Data Shape:', training_data.shape)

        # rbm_wt_vec_list, wt_rec_interval = r.train(training_data, max_epochs = 5000)

        r.train(training_data, max_epochs=5000)

        print('RBM Weights', r.weights.shape)

        patt_miscl_noise = {}

        sim_count = 10
Exemple #12
0
 def rbm_test(self):
   _rbm = RBM("RBM_test", 784, 10)
   _rbm.rbm_train(mnist.train.images, 128, 10)
   w, vb, hb = _rbm.get_param()
   print(hb)
Exemple #13
0
from rbm import RBM
from utils import load_mnist_data
import numpy as np
import PIL.Image as Image

if __name__ == '__main__':
    data = load_mnist_data()
    train_x, train_y, valid_x, valid_y, test_x, test_y = data

    print(train_x.shape)
    print(valid_x.shape)
    print(test_x.shape)

    M = RBM()
    #M.contrastive_divergence(validation = valid_x)
    #m = np.asarray([[1, 2, 3, 4], [2, 3, 4, 5], [2, 3, 1, 1]])
Exemple #14
0
from rbm import RBM
import tensorflow as tf
import input_data

flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', './mnistData/', 'Directory for storing data')

# First RBM
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
rbmobject1 = RBM(784, 100, ['rbmw1', 'rbvb1', 'rbmhb1'], 0.001)

# Train First RBM
for i in range(1000):
    batch_xs, batch_ys = mnist.train.next_batch(10)
    cost = rbmobject1.partial_fit(batch_xs)
    print(cost)

print("###########################################")
batch_xs, batch_ys = mnist.test.next_batch(10)
cost = rbmobject1.partial_fit(batch_xs)
print(cost)
Exemple #15
0
                                                    random_state=0)

best_params = {'n_components': 50, 'learning_rate': 0.02, 'batch_size': 100}
n_iter = 100
n_components = best_params['n_components']
learning_rate = best_params['learning_rate']
batch_size = best_params['batch_size']
verbose = True
random_state = 0
room_temp = 0.8
n_temp = 10

# Models we will use
rbm_pcd = RBM(random_state=random_state,
              verbose=verbose,
              learning_rate=learning_rate,
              n_iter=n_iter,
              n_components=n_components,
              batch_size=batch_size)
rbm_cd = RBM_CD(random_state=random_state,
                verbose=verbose,
                learning_rate=learning_rate,
                n_iter=n_iter,
                n_components=n_components,
                batch_size=batch_size,
                cd_k=1)
rbm_pt = RBM_PT(random_state=random_state,
                verbose=verbose,
                learning_rate=learning_rate,
                n_iter=n_iter,
                n_components=n_components,
                batch_size=batch_size,
Exemple #16
0
    def __init__(self,
                 numpy_rng,
                 theano_rng=None,
                 n_ins_mfcc=39 * N_FRAMES_MFCC,
                 n_ins_arti=60 * N_FRAMES_ARTI,
                 hidden_layers_sizes=[1024, 1024],
                 n_outs=42):
        """This class is made to support a variable number of layers.

        :type numpy_rng: numpy.random.RandomState
        :param numpy_rng: numpy random number generator used to draw initial
                    weights

        :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
        :param theano_rng: Theano random generator; if None is given one is
                           generated based on a seed drawn from `rng`

        :type n_ins: int
        :param n_ins: dimension of the input to the DBN

        :type n_layers_sizes: list of ints
        :param n_layers_sizes: intermediate layers size, must contain
                               at least one value

        :type n_outs: int
        :param n_outs: dimension of the output of the network
        """

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)
        self.n_ins_mfcc = n_ins_mfcc

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2**30))

        # allocate symbolic variables for the data
        #self.x_mfcc = T.fvector('x_mfcc') # TODO
        #self.x_arti = T.fvector('x_arti') # TODO
        self.x_mfcc = T.matrix('x_mfcc')
        self.x_arti = T.matrix('x_arti')
        self.y = T.ivector('y')  # the labels are presented as 1D vector
        # of [int] labels

        # The DBN is an MLP, for which all weights of intermediate
        # layers are shared with a different RBM.  We will first
        # construct the DBN as a deep multilayer perceptron, and when
        # constructing each sigmoidal layer we also construct an RBM
        # that shares weights with that layer. During pretraining we
        # will train these RBMs (which will lead to chainging the
        # weights of the MLP as well) During finetuning we will finish
        # training the DBN by doing stochastic gradient descent on the
        # MLP.

        for i in xrange(self.n_layers):
            if i == 0:
                layer_input = self.x_mfcc
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                            input=layer_input,
                                            n_in=n_ins_mfcc,
                                            n_out=hidden_layers_sizes[i],
                                            activation=T.nnet.sigmoid)
                self.sigmoid_layers.append(sigmoid_layer)
                self.params.extend(sigmoid_layer.params)
                rbm_layer = GRBM(numpy_rng=numpy_rng,
                                 theano_rng=theano_rng,
                                 input=layer_input,
                                 n_visible=n_ins_mfcc,
                                 n_hidden=hidden_layers_sizes[i],
                                 W=sigmoid_layer.W,
                                 hbias=sigmoid_layer.b)
                self.rbm_layers.append(rbm_layer)
            elif i == 1:
                layer_input = self.x_arti
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                            input=layer_input,
                                            n_in=n_ins_arti,
                                            n_out=hidden_layers_sizes[i],
                                            activation=T.nnet.sigmoid)
                self.sigmoid_layers.append(sigmoid_layer)
                self.params.extend(sigmoid_layer.params)
                rbm_layer = GRBM(numpy_rng=numpy_rng,
                                 theano_rng=theano_rng,
                                 input=layer_input,
                                 n_visible=n_ins_arti,
                                 n_hidden=hidden_layers_sizes[i],
                                 W=sigmoid_layer.W,
                                 hbias=sigmoid_layer.b)
                self.rbm_layers.append(rbm_layer)
            elif i == 2:
                input_size = hidden_layers_sizes[i -
                                                 2] + hidden_layers_sizes[i -
                                                                          1]
                layer_input = T.concatenate([
                    self.sigmoid_layers[-2].output,
                    self.sigmoid_layers[-1].output
                ],
                                            axis=1)  # TODO
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                            input=layer_input,
                                            n_in=input_size,
                                            n_out=hidden_layers_sizes[i],
                                            activation=T.nnet.sigmoid)
                self.sigmoid_layers.append(sigmoid_layer)
                self.params.extend(sigmoid_layer.params)
                rbm_layer = RBM(numpy_rng=numpy_rng,
                                theano_rng=theano_rng,
                                input=layer_input,
                                n_visible=input_size,
                                n_hidden=hidden_layers_sizes[i],
                                W=sigmoid_layer.W,
                                hbias=sigmoid_layer.b)
                self.rbm_layers.append(rbm_layer)
            else:
                input_size = hidden_layers_sizes[i - 1]
                layer_input = self.sigmoid_layers[-1].output
                sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                            input=layer_input,
                                            n_in=input_size,
                                            n_out=hidden_layers_sizes[i],
                                            activation=T.nnet.sigmoid)
                self.sigmoid_layers.append(sigmoid_layer)
                self.params.extend(sigmoid_layer.params)
                rbm_layer = RBM(numpy_rng=numpy_rng,
                                theano_rng=theano_rng,
                                input=layer_input,
                                n_visible=input_size,
                                n_hidden=hidden_layers_sizes[i],
                                W=sigmoid_layer.W,
                                hbias=sigmoid_layer.b)
                self.rbm_layers.append(rbm_layer)

        # We now need to add a logistic layer on top of the MLP
        self.logLayer = LogisticRegression(
            input=self.sigmoid_layers[-1].output,
            n_in=hidden_layers_sizes[-1],
            n_out=n_outs)
        self.params.extend(self.logLayer.params)

        # compute the cost for second phase of training, defined as the
        # negative log likelihood of the logistic regression (output) layer
        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)

        # compute the gradients with respect to the model parameters
        # symbolic variable that points to the number of errors made on the
        # minibatch given by self.x and self.y
        self.errors = self.logLayer.errors(self.y)
Exemple #17
0
tf.flags.DEFINE_string("result_path", "result.txt","")
tf.flags.DEFINE_string("sep", "\t", "")
FLAGS = tf.flags.FLAGS

'''
comments:
train dataset/test dateset : "ml-100k/u1.base"/"ml-100k/u1.test"; columns: user_id, movie_id, ratings, timestamp
profiles -- type: dict -- key: user_id, value: (movie_id, rating)
    :store ratings of movies from every user

'''

if __name__ == "__main__":
    all_users, all_movies, tests = load_dataset(FLAGS.train_path, FLAGS.test_path,
                                                FLAGS.sep, user_based=True)
    rbm = RBM(len(all_movies) * 5, FLAGS.num_hidden)
    print("model created")
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)
    profiles = defaultdict(list)
    with open(FLAGS.train_path, 'rt') as data:
        for i, line in enumerate(data):
            uid, mid, rat, timstamp = line.strip().split(FLAGS.sep)
            profiles[uid].append((mid, float(rat)))
    print("Users and ratings loaded")
    for e in range(FLAGS.epochs):
        
        for batch_i, batch in enumerate(chunker(list(profiles.keys()),
                                                FLAGS.batch_size)):
            size = min(len(batch), FLAGS.batch_size)
Exemple #18
0
    else:
        use_pcd = (sys.argv[1] == 'pcd')
        cd_k = int(sys.argv[2])
        output_dir = None if len(sys.argv) == 3 else sys.argv[3]
        init_with_test = not use_pcd

    # decoder_dir = 'relu_deep_model1_relu_6'
    decoder_dir = 'noise_deep_model2'
    dataset = Cifar10Wrapper.load_from_h5(
        os.path.join(decoder_dir, 'encoded_cifar10.h5'))
    # print '>>>>>'
    # dataset.scale()

    batch_size = 100
    lr = 0.001 if use_pcd else 0.1

    # rbm = GaussianRBM(dataset.train_xs[0].size, 1000, output_dir)
    rbm = RBM(dataset.train_xs[0].size, 2000, output_dir)

    train(rbm,
          dataset,
          lr,
          500,
          batch_size,
          use_pcd,
          cd_k,
          output_dir,
          pcd_chain_size=100,
          decoder_dir=decoder_dir,
          init_with_test=init_with_test)
Exemple #19
0
flags.DEFINE_integer('epochs', 50, 'The number of training epochs')
flags.DEFINE_integer('batchsize', 30, 'The batch size')
flags.DEFINE_boolean('restore_rbm', False,
                     'Whether to restore the RBM weights or not.')

# ensure output dir exists
if not os.path.isdir('out'):
    os.mkdir('out')

mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)

trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX, teY = min_max_scale(trX, teX)

# RBMs
rbmobject1 = RBM(784, 900, ['rbmw1', 'rbvb1', 'rbmhb1'], 0.3)
rbmobject2 = RBM(900, 500, ['rbmw2', 'rbvb2', 'rbmhb2'], 0.3)
rbmobject3 = RBM(500, 250, ['rbmw3', 'rbvb3', 'rbmhb3'], 0.3)
rbmobject4 = RBM(250, 2, ['rbmw4', 'rbvb4', 'rbmhb4'], 0.3)

if FLAGS.restore_rbm:
    rbmobject1.restore_weights('./out/rbmw1.chp')
    rbmobject2.restore_weights('./out/rbmw2.chp')
    rbmobject3.restore_weights('./out/rbmw3.chp')
    rbmobject4.restore_weights('./out/rbmw4.chp')

# Autoencoder
autoencoder = AutoEncoder(784, [900, 500, 250, 2],
                          [['rbmw1', 'rbmhb1'], ['rbmw2', 'rbmhb2'],
                           ['rbmw3', 'rbmhb3'], ['rbmw4', 'rbmhb4']],
                          tied_weights=False)
# -*- coding: utf-8 -*-
from rbm import RBM
from au import AutoEncoder
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

import PreprocessGenerative as i


inputData = i.importData()

# Train 4-Layer Deep Belief Network
print('DBN')

rbm1 = RBM(inputData[0].shape[0], 900, ['rbmw1', 'rbvb1', 'rbmhb1'], 0.3)
rbm2 = RBM(900, 500, ['rbmw2', 'rbvb2', 'rbmhb2'], 0.3)
rbm3 = RBM(500, 250, ['rbmw3', 'rbvb3', 'rbmhb3'], 0.3)
rbm4 = RBM(250, 2,   ['rbmw4', 'rbvb4', 'rbmhb4'], 0.3)

epoch = 1

# Train First RBM
print('first rbm')

for g in range(epoch):
    for it in range(len(inputData)):
        trX = inputData[it][np.newaxis]
        rbm1.partial_fit(trX)
        print(rbm1.compute_cost(trX))
    print(rbm1.compute_cost(trX))
Exemple #21
0
    "/Users/matthewzeitlin/Desktop/CS156b-Netflix/data/train.tf.dta")

# test_set = tf.data.TextLineDataset("/home/CS156b-Netflix/data/probe.dta")

dataset = dset.map(_user_parse_function)
train_4_probe = train_4_probe.map(_user_parse_function)
full_train = full_train.map(_user_parse_function)
probe = probe.map(_test_parse_function)
#test_set = test_set.map(_test_parse_function)

#user_index_dataset = dset.map(_user_parse_function)

# a = model.predict(test_set, user_index_dataset)
# print(a)

rbm = RBM()
rbm.train(dataset, 50, probe, train_4_probe)

########### Submission ###############
exit(0)
saver = tf.contrib.eager.Saver(rbm.get_variables())
saver.restore("models522/rbm")

print("Predicting")
test_set = tf.data.TextLineDataset(
    "/home/ubuntu/CS156b-Netflix/deep_autoencoder/qual_edited.dta")
test_set = test_set.map(_test_parse_function)
rbm.pred_for_sub(test_set, dataset, True, "rbm_probe_qual.txt")
# print("Created submission for test set")
# rbm.pred_for_sub(full_train, full_train, True, "rbm_train.txt")
Exemple #22
0
ncol = data.shape[1]
data_target = data[...,0]
data = data[...,1:ncol]
num_cases = data.shape[0]
num_dims = data.shape[1]
num_vis = num_dims

perm = np.random.permutation(num_cases)
data = data[perm]
data_target = data_target[perm]
data_sh = theano.shared(np.asarray(data, dtype=theano.config.floatX), borrow=True)
data_target_sh = theano.shared(np.asarray(data_target, dtype=theano.config.floatX), borrow=True)



rbm = RBM(num_vis = num_dims, num_hid = 500)
rbm_line = RBMBinLine(num_vis = 500, num_hid = 2)

# hyper parameters
train_params = { 'batch_size' : 100, 'learning_rate' : 0.1, 'cd_steps' : 2, 'max_epoch' : 25, 'persistent' : False}

train_rbm(rbm, data_sh, train_params, False, False)
fine_tune(rbm, data_sh, epochs = 10, batch_size=100)

# collect statistics
pre_sigm, hid_stat = theano.function([], rbm.prop_up(data_sh))()
hid_stat_sh = theano.shared(np.asarray(hid_stat, dtype=theano.config.floatX), borrow=True)

# hyper parameters
train_params = { 'batch_size' : 100, 'learning_rate' : 0.05, 'cd_steps' : 1, 'max_epoch' : 20, 'persistent' : False }
train_rbm(rbm_line, hid_stat_sh, train_params, False, False)
Exemple #23
0
    def fit(self, X, y):
        self.build_net(X.shape[1], len(np.unique(y)))

        # Assign weights of layers as views of the big weights
        if self.coef_ is None:
            ws = list()
            for layer in self.layers:
                ws.append(layer.b.reshape(-1))
                ws.append(layer.W.reshape(-1))
            self.coef_ = np.concatenate(tuple(ws))
            self.assign_weights()

        # Pretrain
        if self.pretrain_epochs > 0:
            if self.progress_bars:
                if self.pretrain_batches_per_epoch == -1:
                    batches_per_epoch = int(X.shape[0] /
                                            self.pretrain_batch_size)
                else:
                    batches_per_epoch = self.pretrain_batches_per_epoch

                maxiters = self.pretrain_epochs * batches_per_epoch * len(
                    self.layers)
                pt_bar = ProgressBar(max=maxiters, desc='Pretrain')

            if self.pretrain_batch_size == -1:
                # Use full-batch
                self.pretrain_batch_size = X.shape[0]

            # Create RBM layers using the same weights
            self.rbm_layers = []
            for i, layer in enumerate(self.layers):
                n_hid = layer.W.shape[1]
                new = RBM(layer)
                self.rbm_layers.append(new)

            # Actual pretrain
            for i, rbm_layer in enumerate(self.rbm_layers):
                for epoch in range(self.pretrain_epochs):
                    mb = MBOpti.minibatches(
                        X,
                        batch_size=self.pretrain_batch_size,
                        batches=self.pretrain_batches_per_epoch,
                        random_state=self.rnd)

                    for j, batch in enumerate(mb):
                        if i == 0:
                            input = batch
                        else:
                            # input = self.layers[i - 1].output(batcn)
                            try:
                                input = self.layers[i -
                                                    1].sample_h_given_v(input)
                            except:
                                print input.shape, self.layers[i - 1].W.shape
                                raise Exception('1')

                        rbm_layer.contrastive_divergence(input)
                        if self.progress_bars:
                            pt_bar.next()
                        if self.pretrain_callback is not None:
                            stop = self.pretrain_callback(
                                self, layer, epoch + 1, j + 1)
                            if stop == True:
                                break

            if self.progress_bars:
                pt_bar.complete()

        # Finetune
        if self.finetune_epochs > 0:
            if self.progress_bars:
                if self.finetune_batches_per_epoch == -1:
                    batches_per_epoch = int(X.shape[0] /
                                            self.finetune_batch_size)
                else:
                    batches_per_epoch = self.finetune_batches_per_epoch

                maxiters = self.finetune_epochs * batches_per_epoch
                ft_bar = ProgressBar(max=maxiters, desc='Finetune')

            def _callback(epoch, i):
                if self.progress_bars:
                    ft_bar.next()
                if self.finetune_callback is not None:
                    return self.finetune_callback(self, epoch, i)

            self.finetune_options = self.finetune_options.copy()
            args = (self.layers, len(np.unique(y)))
            MBOpti.minimize(self.coef_,
                            X,
                            y,
                            fun=cost,
                            grad=cost_prime,
                            weights=self.coef_,
                            method=self.finetune_method,
                            epochs=self.finetune_epochs,
                            batch_size=self.finetune_batch_size,
                            batches_per_epoch=self.finetune_batches_per_epoch,
                            options=self.finetune_options,
                            args=args,
                            callback=_callback,
                            random_state=self.rnd)

            if self.progress_bars:
                ft_bar.complete()
Exemple #24
0
localPropertyFinder = PropertyFinder(localQuerier)

effectsList = [{'effect': "", 'disease': ""}]

csvFile = open("result.csv")
trainingSet = DictReader(csvFile)


trainingRows = []
trainingData = [[0 for i in range(len(effectsList))] for j in range(TRAINING_SAMPLE_SIZE)]

index1=0
for row in  trainingSet:
    if index1 < TRAINING_SAMPLE_SIZE :
        geneProperties = DictReader(localPropertyFinder.findGeneProperties(row['gene']))
        for prop in geneProperties:
            for index2, item in effectsList:
                if prop == item:
                    trainingData[index1][index2] = 1
        drugProperties = DictReader(localPropertyFinder.findDrugProperties(row['drug']))
        for prop in drugProperties:
            for index2, item in effectsList:
                if prop == item:
                    trainingData[index1][index2] = 1
    index1 = index1 + 1

rbm = RBM(num_visible=len(effectsList), num_hidden=30)
rbm.train(np.array(trainingData), max_epochs = 300)

trainingSet.close()
    #Read in the trained RBM parameters:
    path_to_params = 'data_ising2d/RBM_parameters_solutions/parameters_nH%d_L%d' % (
        num_hidden, L)
    path_to_params += '_T' + str(T) + '.npz'
    params = np.load(path_to_params)
    weights = params['weights']
    visible_bias = params['visible_bias']
    hidden_bias = params['hidden_bias']
    hidden_bias = np.reshape(hidden_bias, (hidden_bias.shape[0], 1))
    visible_bias = np.reshape(visible_bias, (visible_bias.shape[0], 1))

    # Initialize RBM class
    rbms.append(
        RBM(num_hidden=num_hidden,
            num_visible=num_visible,
            weights=weights,
            visible_bias=visible_bias,
            hidden_bias=hidden_bias,
            num_samples=num_samples))
    rbm_samples.append(rbms[i].stochastic_maximum_likelihood(gibb_updates))
#end of loop over temperatures

# Initialize tensorflow
init = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())

# Sample thermodynamic observables:
N = num_visible
with tf.Session() as sess:
    sess.run(init)

    for i in range(nbins):
        print('bin %d' % i)
    def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
                 hidden_layers_sizes=[500, 500], n_outs=10, finetune_lr=0.1):
        #和原始dbn一样的构建基本的网络结构

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)

        assert self.n_layers > 0
        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x')   # the data is presented as rasterized images
        self.y = T.ivector('y')  # the labels are presented as 1D vector
                                 # of [int] labels

        for i in xrange(self.n_layers):
            if i == 0:
                input_size = n_ins
                layer_input = self.x
            else:
                input_size = hidden_layers_sizes[i - 1]
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=T.nnet.sigmoid)

            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)

            self.params.extend(sigmoid_layer.params)

            # Construct an RBM that shared weights with this layer
            #这里唯一的不同,就是第一层是用修改后的GBRBM
            if i == 0:
                rbm_layer = GBRBM(input=layer_input, n_in=input_size, n_hidden=hidden_layers_sizes[i], \
                W=None, hbias=None, vbias=None, numpy_rng=None, transpose=False, activation=T.nnet.sigmoid,
                theano_rng=None, name='grbm', W_r=None, dropout=0, dropconnect=0)
            else:
                rbm_layer = RBM(numpy_rng=numpy_rng,
                                theano_rng=theano_rng,
                                input=layer_input,
                                n_visible=input_size,
                                n_hidden=hidden_layers_sizes[i],
                                W=sigmoid_layer.W,
                                hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)

        # We now need to add a logistic layer on top of the MLP
        self.logLayer = LogisticRegression(
            input=self.sigmoid_layers[-1].output,
            n_in=hidden_layers_sizes[-1],
            n_out=n_outs)
        self.params.extend(self.logLayer.params)

        # compute the cost for second phase of training, defined as the
        # negative log likelihood of the logistic regression (output) layer
        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)

        # compute the gradients with respect to the model parameters
        # symbolic variable that points to the number of errors made on the
        # minibatch given by self.x and self.y
        self.errors = self.logLayer.errors(self.y)

        #################################################
        # Wudi change the annealing learning rate:
        #################################################
        self.state_learning_rate =  theano.shared(numpy.asarray(finetune_lr,
                                               dtype=theano.config.floatX),
                                               borrow=True)
Exemple #27
0
                                _a[0]: self._X,
                                y: self._Y
                            }))))


# 读取数据
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
# 创建3个RBM模型
RBM_hidden_sizes = [500, 200, 50]
inpX = trX
# 保存模型
rbm_list = []

# 输入数量
input_size = inpX.shape[1]

# 对RBM模型开始训练
print('Pre_train begins!')
for i, size in enumerate(RBM_hidden_sizes):
    print('RBM: ', i, ' ', input_size, '->', size)
    rbm_list.append(RBM(input_size, size))
    input_size = size
for rbm in rbm_list:
    print('New RBM:')
    rbm.train(inpX)
    inpX = rbm.rbm_outpt(inpX)
print('Train begins!')
nNet = NN(RBM_hidden_sizes, trX, trY)
nNet.load_from_rbms(RBM_hidden_sizes, rbm_list)
nNet.train()
Exemple #28
0
    def __init__(self, numpy_rng, theano_rng=None, n_ins=DIMENSION * N_FRAMES,
                 hidden_layers_sizes=[1024, 1024, 1024], n_outs=N_OUTS):
        """This class is made to support a variable number of layers.

        :type numpy_rng: numpy.random.RandomState
        :param numpy_rng: numpy random number generator used to draw initial
                    weights
                    numpy随机数生成器,用于初始化权重

        :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
        :param theano_rng: Theano random generator; if None is given one is
                           generated based on a seed drawn from `rng`
                           theano随机数生成器

        :type n_ins: int
        :param n_ins: dimension of the input to the DBN
                        DBN的输入样本维数

        :type n_layers_sizes: list of ints
        :param n_layers_sizes: intermediate layers size, must contain
                               at least one value
                               每个隐层大小,至少一个数

        :type n_outs: int
        :param n_outs: dimension of the output of the network
                        输出维数
        """

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        # allocate symbolic variables for the data
        # 为数据开辟符号变量
        self.x = T.matrix('x')  # the data is presented as rasterized images 数据表示为光栅图像
        self.y = T.ivector('y')  # the labels are presented as 1D vector 标签表示为一维整形数组
                                 # of [int] labels

        # The DBN is an MLP, for which all weights of intermediate
        # layers are shared with a different RBM.  We will first
        # construct the DBN as a deep multilayer perceptron, and when
        # constructing each sigmoidal layer we also construct an RBM
        # that shares weights with that layer. During pretraining we
        # will train these RBMs (which will lead to chainging the
        # weights of the MLP as well) During finetuning we will finish
        # training the DBN by doing stochastic gradient descent on the
        # MLP.

        #DBN是一个MLP,每个中间层的权重被一个RBM共享,且RBM互不相同.先建立一个MLP,
        #在建立MLP的若干sigmoid层时建立对应的RBM层,RBM层共享sigmoid层的权重.
        #预训练时训练RBM,并引发MLP的权重改变
        #微调时通过在MLP中做随机梯度下降完成训练

        for i in xrange(self.n_layers):
            # construct the sigmoidal layer

            # the size of the input is either the number of hidden
            # units of the layer below or the input size if we are on
            # the first layer
            # 每层的输入大小:在第一层,为输入数据大小
            # 不在第一层,为上一层输出大小
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]

            # the input to this layer is either the activation of the
            # hidden layer below or the input of the DBN if you are on
            # the first layer
            # 每层的输入数据:在第一层,为输入数据
            # 不在第一层,为上一层输出
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=T.nnet.sigmoid)

            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)

            # its arguably a philosophical question...  but we are
            # going to only declare that the parameters of the
            # sigmoid_layers are parameters of the DBN. The visible
            # biases in the RBM are parameters of those RBMs, but not
            # of the DBN.
            #sigmoid层的参数是DBN的参数
            #但是RBM的可见偏置是RBM的参数,不是DBM的参数
            self.params.extend(sigmoid_layer.params)

            # Construct an RBM that shared weights with this layer
            # 建立共享这一层权重的RBM
            rbm_layer = RBM(numpy_rng=numpy_rng,
                            theano_rng=theano_rng,
                            input=layer_input,
                            n_visible=input_size,
                            n_hidden=hidden_layers_sizes[i],
                            W=sigmoid_layer.W,
                            hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)

        # We now need to add a logistic layer on top of the MLP
        # 在MLP上方加一个logistic层
        self.logLayer = LogisticRegression(
            input=self.sigmoid_layers[-1].output,
            n_in=hidden_layers_sizes[-1],
            n_out=n_outs)
        self.params.extend(self.logLayer.params)

        # compute the cost for second phase of training, defined as the
        # negative log likelihood of the logistic regression (output) layer
        # 计算训练第二阶段的代价,定义为logistic回归的负对数似然性
        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)

        # compute the gradients with respect to the model parameters
        # symbolic variable that points to the number of errors made on the
        # minibatch given by self.x and self.y
        # 根据模型参数计算梯度
        # 指向有小批量数据产生的错误数的符号变量由self.x,self.y给出
        self.errors = self.logLayer.errors(self.y)
Exemple #29
0
    def __init__(self,
                 numpy_rng,
                 theano_rng=None,
                 n_ins=784,
                 hidden_layers_sizes=[500, 500],
                 n_outs=1,
                 activation_method="Sigmoid"):
        """This class is made to support a variable number of layers.

    :type numpy_rng: numpy.random.RandomState
        :param numpy_rng: numpy random number generator used to draw initial
                    weights

        :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams
        :param theano_rng: Theano random generator; if None is given one is
                           generated based on a seed drawn from `rng`

        :type n_ins: int
        :param n_ins: dimension of the input to the DBN

        :type hidden_layers_sizes: list of ints
        :param hidden_layers_sizes: intermediate layers size, must contain
                               at least one value

        :type n_outs: int
        :param n_outs: dimension of the output of the network
        """

        self.sigmoid_layers = []
        self.rbm_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)
        self.activation = T.nnet.sigmoid

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = MRG_RandomStreams(numpy_rng.randint(2**30))

        # allocate symbolic variables for the data

        # the data is presented as rasterized images
        self.x = T.matrix('x')

        # the labels are presented as 1D vector of [int] labels
        self.y = T.fvector('y')

        # end-snippet-1
        # The DBN is an MLP, for which all weights of intermediate
        # layers are shared with a different RBM.  We will first
        # construct the DBN as a deep multilayer perceptron, and when
        # constructing each sigmoidal layer we also construct an RBM
        # that shares weights with that layer. During pretraining we
        # will train these RBMs (which will lead to chainging the
        # weights of the MLP as well) During finetuning we will finish
        # training the DBN by doing stochastic gradient descent on the
        # MLP.

        for i in range(self.n_layers):
            # construct the sigmoidal layer

            # the size of the input is either the number of hidden
            # units of the layer below or the input size if we are on
            # the first layer
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]

            # the input to this layer is either the activation of the
            # hidden layer below or the input of the DBN if you are on
            # the first layer
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=self.activation)

            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)

            # its arguably a philosophical question...  but we are
            # going to only declare that the parameters of the
            # sigmoid_layers are parameters of the DBN. The visible
            # biases in the RBM are parameters of those RBMs, but not
            # of the DBN.
            self.params.extend(sigmoid_layer.params)

            # Construct an RBM that shared weights with this layer
            rbm_layer = RBM(numpy_rng=numpy_rng,
                            theano_rng=theano_rng,
                            input=layer_input,
                            n_visible=input_size,
                            n_hidden=hidden_layers_sizes[i],
                            W=sigmoid_layer.W,
                            hbias=sigmoid_layer.b)
            self.rbm_layers.append(rbm_layer)

        # We now need to add a logistic layer on top of the MLP
        self.logLayer = LinearRegression(input=self.sigmoid_layers[-1].output,
                                         n_in=hidden_layers_sizes[-1],
                                         n_out=n_outs,
                                         l2=0,
                                         l1=0)
        self.params.extend(self.logLayer.params)

        # compute the cost for second phase of training, defined as the
        # negative log likelihood of the logistic regression (output) layer
        self.finetune_cost = self.logLayer.negative_log_likelihood(self.y)

        # compute the gradients with respect to the model parameters
        # symbolic variable that points to the number of errors made on the
        # minibatch given by self.x and self.y
        self.errors = self.logLayer.errors(self.y)
Exemple #30
0
import numpy as np
from rbm import GibbsSampler, RBM

no_visible = 10
no_hidden = 5

# RBM
rbm = RBM(no_visible=no_visible, no_hidden=no_hidden)

# Gibbs sampler
g = GibbsSampler()

# Starting batch = random
batch_size = 3
visible_batch = np.random.randint(low=0, high=2, size=(batch_size, no_visible))
print("Starting visible:")
print(visible_batch)

hidden_batch = g.sample_hidden_given_visible(visible_batch=visible_batch,
                                             bias_hidden_tf=rbm.bias_hidden,
                                             weights_tf=rbm.weights,
                                             binary=True)
print("Visible -> hidden:")
print(hidden_batch)

visible_batch = g.sample_visible_given_hidden(hidden_batch=hidden_batch,
                                              bias_visible_tf=rbm.bias_visible,
                                              weights_tf=rbm.weights,
                                              binary=True)
print("Hidden -> visible:")
print(visible_batch)