Exemplo n.º 1
0
    def test_consistency_val(self):
        rbm1 = BernoulliRBM(max_epoch=2,
                            model_path='test_rbm_1/',
                            **self.rbm_config)
        rbm2 = BernoulliRBM(max_epoch=2,
                            model_path='test_rbm_2/',
                            **self.rbm_config)

        rbm1.fit(self.X, self.X_val)
        rbm2.fit(self.X, self.X_val)

        self.compare_weights(rbm1, rbm2)
        self.compare_transforms(rbm1, rbm2)

        # cleanup
        self.cleanup()
Exemplo n.º 2
0
def trainRBM(data, n_components, n_iter, batch_size, learning_rate=0.01):
    acc = []
    err = np.zeros((n_iter, 2))

    rbm = BernoulliRBM(verbose=True,
                       batch_size=batch_size,
                       random_state=1,
                       n_components=n_components)

    n_features = len(data['X'][0])
    rbm.learning_rate = learning_rate

    #initialize the weight matrix with small (normally distributed) random values with hidden and visible biases initialized to 0.
    rbm.components = np.random.randn(n_components, n_features) * 0.1
    rbm.intercept_hidden_ = np.zeros((n_components, ))
    rbm.intercept_visible_ = np.zeros((n_features, ))

    rbm.n_iter = 1
    for i in range(n_iter):
        rbm.fit(data['X'])
        test = rbm.gibbs(data['X'])
        train = rbm.gibbs(data['X'])
        err[i, 1] = np.sum(
            (test - data['X'])**2) / (n_features * len(data['X']))
        err[i, 0] = np.sum(
            (train - data['X'])**2) / (n_features * len(data['X']))

    return rbm, err
Exemplo n.º 3
0
 def rbm(self, factory, op_mode):
     iterations = self.c_iterations if op_mode < BernoulliRBM.op_mode_quantum else self.q_iterations
     if factory is BernoulliRBM:
         return BernoulliRBM(tester=self,
                             learning_rate=self.lr,
                             n_iter=iterations,
                             n_components=self.rbm_hidden,
                             verbose=True,
                             batch_size=1000,
                             op_mode=op_mode)
     elif factory is PCDRBM:
         return PCDRBM(tester=self,
                       visible=self.dataset_categories[self.cur_dataset],
                       hidden=self.rbm_hidden,
                       particles=-1,
                       iterations=iterations,
                       epochs=10000,
                       step=self.lr,
                       weight_decay=0,
                       op_mode=op_mode)
Exemplo n.º 4
0

# Load Data
X, y = datasets.load_digits(return_X_y=True)
X = np.asarray(X, 'float32')
X, Y = nudge_dataset(X, y)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001)  # 0-1 scaling

X_train, X_test, Y_train, Y_test = train_test_split(X,
                                                    Y,
                                                    test_size=0.2,
                                                    random_state=0)

# Models we will use
logistic = linear_model.LogisticRegression(solver='newton-cg', tol=1)
rbm = BernoulliRBM(Saver(), random_state=0, verbose=True)

rbm_features_classifier = Pipeline(steps=[('rbm', rbm), ('logistic',
                                                         logistic)])

# #############################################################################
# Training

# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 10
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
Exemplo n.º 5
0
def assignment4_2_DBN():
    data = loadAll()
    n_iter = 75
    n_iter_mlp = 50

    nodes = []  #[150, 100] #, 50]
    no_of_layers = len(nodes)

    learning_rate = 0.01

    batch_size = 200

    train = data['X']
    test_img = data['plot_ims']
    rbms = []
    pipe = []
    acc = []

    prev_layer_size = len(train[0])

    for i in range(no_of_layers):

        rbm = BernoulliRBM(n_components=nodes[i],
                           verbose=True,
                           batch_size=batch_size,
                           random_state=1)
        rbm.learning_rate = learning_rate
        n_features = len(train[0])
        rbm.components = np.random.randn(nodes[i], prev_layer_size) * 0.1
        rbm.intercept_hidden_ = np.zeros((nodes[i], ))
        rbm.intercept_visible_ = np.zeros((prev_layer_size, ))
        rbm.n_iter = n_iter

        #train = rbm.fit_transform(train)  # Enable to start pre-training

        rbms.append(rbm)
        pipe.append(('rbm{}'.format(i), rbm))

        prev_layer_size = nodes[i]

    mlp = MLPClassifier(
        solver='sgd',
        random_state=1,
        learning_rate="adaptive",
        learning_rate_init=0.01,
        hidden_layer_sizes=(784, 10),  #(nodes[no_of_layers-1], 10),
        max_iter=n_iter_mlp,
        verbose=True)

    #print(pipe)
    pipe.append(('mlp', mlp))
    print(pipe)

    clsf = Pipeline(pipe)

    clsf.fit(data['X'], np.ravel(data['T_trn']))

    predicted_classes = clsf.predict(data['X_tst'])

    acc.append(
        np.sum(data['T_tst'].T == predicted_classes) / len(data['T_tst']) *
        100)

    print(acc)

    joblib.dump(clsf, 'dbn_{}l.pkl'.format(no_of_layers))