def pretrain_rbm_layers(v, validation_v=None, n_hidden=[], gibbs_steps=[], batch_size=[], num_epochs=[], learning_rate=[], probe_epochs=[]):
    rbm_layers = []
    n_rbm = len(n_hidden)
    # create rbm layers
    for i in range(n_rbm):
        rbm = RBM(n_hidden=n_hidden[i],
                    gibbs_steps=gibbs_steps[i],
                    batch_size=batch_size[i],
                    num_epochs=num_epochs[i],
                    learning_rate=learning_rate[i],
                    probe_epochs=probe_epochs[i])
        rbm_layers.append(rbm)
    # pretrain rbm layers
    input = v
    validation_input = validation_v
    for rbm, i in zip(rbm_layers, range(len(rbm_layers))):
        print '### pretraining RBM Layer {i}'.format(i=i)
        rbm.fit(input, validation_input)
        output = rbm.sample_h_given_v(input, rbm.params['W'], rbm.params['c'])
        if validation_input is not None:
            validation_output = rbm.sample_h_given_v(validation_input, rbm.params['W'], rbm.params['c'])
        else:
            validation_output = None
        input = output
        validation_input = validation_output
    return rbm_layers
Beispiel #2
0
    def fit_network(self, X, labels=None):
        if labels is None:
            labels = numpy.zeros((X.shape[0], 2))
        self.layers = []
        temp_X = X
        for j in range(self.num_layers):

            print "\nTraining Layer %i" % (j + 1)
            print "components: %i" % self.components[j]
            print "batch_size: %i" % self.batch_size[j]
            print "learning_rate: %0.3f" % self.learning_rate[j]
            print "bias_learning_rate: %0.3f" % self.bias_learning_rate[j]
            print "epochs: %i" % self.epochs[j]
            print "Sparsity: %s" % str(self.sparsity_rate[j])
            print "Sparsity Phi: %s" % str(self.phi)
            if j != 0:
                self.plot_weights = False

            model = RBM(n_components=self.components[j], batch_size=self.batch_size[j],
                        learning_rate=self.learning_rate[j], regularization_mu=self.sparsity_rate[j],
                        n_iter=self.epochs[j], verbose=True, learning_rate_bias=self.bias_learning_rate[j],
                        plot_weights=self.plot_weights, plot_histograms=self.plot_histograms, phi=self.phi)

            if j + 1 == self.num_layers and labels is not None:
                model.fit(numpy.asarray(temp_X), numpy.asarray(labels))
            else:
                model.fit(numpy.asarray(temp_X))

            temp_X = model._mean_hiddens(temp_X)  # hidden layer given visable units
            print "Trained Layer %i\n" % (j + 1)

            self.layers.append(model)
 def test_run(self):
     r = RBM(num_input=6, num_hidden=2)
     training_data = np.array([[1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0],
                               [1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0],
                               [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]])
     r.fit(training_data, max_epochs=1000)
     user = np.array([[0, 0, 0, 1, 1, 0]])
     r.run_visible(user)
    def test_predict(self):
        rbm = RBM(num_input=2, num_hidden=1, num_output=2)

        train_data = np.asarray([[0, 0], [1, 1], [0, 1], [1, 0]])
        train_label = np.asarray([[1, 0], [1, 0], [0, 1], [0, 1]])

        rbm.fit(train_data, train_label, max_epoch=1000)
        # print(rbm.run_visible([[0, 0, 0], [0, 0, 1], [1, 0, 0], [1, 0, 1]]))
        print(rbm.free_energy(np.array([[1, 0, 1, 0], [1, 0, 0, 1]])))
        print(rbm.predict(train_data))
Beispiel #5
0
    def fit(self, X, y):
        self._setup_input(X, y)
        self.n_classes = self._set_n_classes()
        self.y = self.y.reshape(-1, 1)

        x_train = self.X
        for hidden_unit in self.hidden_layers:
            layer = RBM(n_hidden=hidden_unit, lr=self.rbm_lr,
                        batch_size=self.batch_size, max_epochs=self.rbm_epochs, active_func=self.active_func)
            layer.fit(x_train)
            self.rbm_layers.append(layer)
            x_train = layer.predict(x_train)
        self._fine_tuning(self.X, self.y)
 def test_free_energy(self):
     r = RBM(num_input=6, num_hidden=2)
     training_data = np.array([[1, 1, 1, 0, 0, 0], [1, 0, 1, 0, 0, 0],
                               [1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0],
                               [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0]])
     r.fit(training_data, max_epochs=1000)
     f = r.free_energy(np.array([[1, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 0]]))
     self.assertLess(f[1], f[0])
     f = r.free_energy(np.array([[1, 1, 0, 0, 1, 0], [1, 0, 1, 0, 0, 0]]))
     self.assertLess(f[1], f[0])
     f = r.free_energy(np.array([[1, 0, 1, 0, 1, 1], [0, 0, 1, 1, 1, 0]]))
     self.assertLess(f[1], f[0])
     f = r.free_energy(np.array([[0, 1, 1, 0, 1, 0], [1, 1, 1, 0, 0, 0]]))
     self.assertLess(f[1], f[0])
     f = r.free_energy(np.array([[0, 1, 1, 1, 1, 0], [0, 0, 1, 1, 0, 0]]))
     self.assertLess(f[1], f[0])
Beispiel #7
0
                  n_components=n_components,
                  batch_size=batch_size,
                  n_temperatures=n_temp,
                  room_temp=room_temp)
rbm_lptp = RBM_LPTOC(random_state=random_state,
                     verbose=verbose,
                     learning_rate=learning_rate,
                     n_iter=n_iter,
                     n_components=n_components,
                     batch_size=batch_size,
                     n_temperatures=n_temp,
                     room_temp=room_temp)

# Training RBMs
dataset = 'MNIST'
rbm_pcd.fit(X_train, Y_train)
np.save("data/rbm_pcd_weights" + dataset, rbm_pcd.components_)
np.save("data/rbm_pcd_visible_bias" + dataset, rbm_pcd.intercept_visible_)
np.save("data/rbm_pcd_hidden_bias" + dataset, rbm_pcd.intercept_hidden_)
plt.plot(np.arange(1, rbm_pcd.n_iter + 1), rbm_pcd.log_like, label='PCD')

rbm_cd.fit(X_train, Y_train)
np.save("data/rbm_cd_weights" + dataset, rbm_cd.components_)
np.save("data/rbm_cd_visible_bias" + dataset, rbm_cd.intercept_visible_)
np.save("data/rbm_cd_hidden_bias" + dataset, rbm_cd.intercept_hidden_)
plt.plot(np.arange(1, rbm_cd.n_iter + 1), rbm_cd.log_like, label='CD')

rbm_pt.fit(X_train, Y_train)
np.save("data/rbm_pt_weights" + dataset, rbm_pt.components_)
np.save("data/rbm_pt_visible_bias" + dataset, rbm_pt.intercept_visible_)
np.save("data/rbm_pt_hidden_bias" + dataset, rbm_pt.intercept_hidden_)
             'step_config': 1,
             'learning_rate': 0.1,
             'weight_decay': 0}

# initialize model object
rbm = RBM(layers=layers)

if args.model_file:
    assert os.path.exists(args.model_file), '%s not found' % args.model_file
    logger.info('loading initial model state from %s' % args.model_file)
    rbm.load_weights(args.model_file)

# setup standard fit callbacks
callbacks = Callbacks(rbm, train_set, output_file=args.output_file,
                      progress_bar=args.progress_bar)

# add a callback ot calculate

if args.serialize > 0:
    # add callback for saving checkpoint file
    # every args.serialize epchs
    checkpoint_schedule = args.serialize
    checkpoint_model_path = args.save_path
    callbacks.add_serialize_callback(checkpoint_schedule, checkpoint_model_path)

rbm.fit(train_set, optimizer=optimizer, num_epochs=num_epochs, callbacks=callbacks)

for mb_idx, (x_val, y_val) in enumerate(valid_set):
    hidden = rbm.fprop(x_val)
    break
Beispiel #9
0
              'sparse_cost': 0.001,
              'sparse_target': 0.01,
              'persistant': False,
              'kPCD': 1,
              'use_fast_weights': False
              }
n_epochs = 1

init = GlorotUniform()

# it seems that the data have shape 30x30x30, though I think it should be 24 with padding=2
layers = [RBMConvolution3D([6, 6, 6, 48], strides=2, padding=0, init=init, name='l1_conv'),
          RBMConvolution3D([5, 5, 5, 160], strides=2, padding=0, init=init, name='l2_conv'),
          RBMConvolution3D([4, 4, 4, 512], strides=2, padding=0, init=init, name='l3_conv'),
          RBMLayer(1200, init=init, name='l4_rbm'),
          RBMLayerWithLabels(4000, n_classes, name='l4_rbm_with_labels')]




rbm = RBM(layers=layers)

# callbacks = Callbacks(rbm, data, output_file='./output.hdf5')
 callbacks = Callbacks(rbm, data)


t = time.time()
rbm.fit(data, optimizer=parameters, num_epochs=n_epochs, callbacks=callbacks)
t = time.time() - t
print "Training time: ", t
Beispiel #10
0
def rbm():
    X = np.random.uniform(0, 1, (1500, 10))
    rbm = RBM(n_hidden=10, max_epochs=200, batch_size=10, lr=0.05)
    rbm.fit(X)
    print_curve(rbm.errors)
# In[6]:

shape = observed_data.variables["Prcp"][:].shape
lt = 176-1
ln = 23-1
y = observed_data.variables["Prcp"][:, lt, ln]
normalized_gridded = (gridded - gridded[:400].mean(axis=0)) / gridded[:400].std(axis=0)
#normalized_gridded = (normalized_gridded.T - normalized_gridded.T.mean(axis=0)) / normalized_gridded.T.std(axis=0)
#normalized_gridded = normalized_gridded.T

def expit(x, beta=1):
    return 1 / (1 + numpy.exp(-beta * x))

squashed_gridded = expit(normalized_gridded, beta=1)
height, bins = numpy.histogram(squashed_gridded, bins=100)
pyplot.bar(bins[:-1], height, width=1/100.)

pyplot.imshow(squashed_gridded[13].reshape(nlat,nlon))


# In[7]:

boltzmann = RBM(n_iter=100, plot_histograms=True, verbose=True, n_components=500)
boltzmann.fit(squashed_gridded)


# In[ ]:



Beispiel #12
0
from rbm import RBM
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

#Loading in the mnist data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

#Img size
RBM_visible_sizes = 784
RBM_hidden_sizes = 600

#build and test a model with MNIST data
rbm = RBM(RBM_visible_sizes, RBM_hidden_sizes, verbose=1)
rbm.fit(trX, teX)
Beispiel #13
0
from rbm import RBM

model = RBM(train_x.shape[1],
            10,
            visible_unit_type='gauss',
            main_dir='/Users/sensei/Desktop/project2/',
            model_name='rbm_model.ckpt',
            gibbs_sampling_steps=4,
            learning_rate=0.001,
            momentum=0.95,
            batch_size=512,
            num_epochs=10,
            verbose=1)

model.fit(train_x, validation_set=test_x)

test_cost = model.getFreeEnergy(test_x).reshape(-1)

auc(test_y, test_cost)

from sklearn.metrics import roc_curve, auc
fpr, tpr, _ = roc_curve(test_y, test_cost)

fpr_micro, tpr_micro, _ = roc_curve(test_y, test_cost)
roc_auc = auc(fpr_micro, tpr_micro)

plt.plot(fpr,
         tpr,
         color='darkorange',
         lw=2,
Beispiel #14
0
class ARBM_RBM:
    def __init__(
            self,
            n_visible,
            n_hidden,
            n_adaptive,
            sample_visible=False,
            learning_rate=0.01,
            momentum=0.95,
            cdk_level=1,
    ):
        print("ARBM_RBM")
        print("\tThis class mimics the normal RBM.")
        self.rbm = RBM(
            n_visible=n_visible,
            n_hidden=n_hidden,
            learning_rate=learning_rate,
            momentum=momentum,
            cdk_level=cdk_level,
        )

    def fit(
            self,
            data,
            n_epochs=10,
            batch_size=10,
            shuffle=True,
            verbose=True,
    ) -> np.ndarray:
        return self.rbm.fit(
            data=np.transpose(data[0]),
            n_epochs=n_epochs,
            batch_size=batch_size,
            shuffle=shuffle,
            verbose=verbose,
        )

    def convert(
            self,
            source_label,
            source_data,
            target_label,

    ):
        return np.transpose(self.rbm.reconstruct(np.transpose(source_data)))

    def add_speaker(
            self,
            speaker_data,
    ):
        print("\tCannot add speaker.")
        return -1

    def save(
            self,
            filename,
    ):
        pass

    def load(
            self,
            filename,
    ):
        pass

    @staticmethod
    def load_model(filename):
        return ARBM_RBM(0, 0, 0)