Пример #1
0
def test_sample_hiddens():
    rng = np.random.RandomState(0)
    X = Xdigits[:100]
    rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
    rbm1.fit(X)

    h = rbm1._mean_hiddens(X[0])
    hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)

    assert_almost_equal(h, hs, decimal=1)
Пример #2
0
def test_sample_hiddens():
    rng = np.random.RandomState(0)
    X = Xdigits[:100]
    rbm1 = BernoulliRBM(n_components=2, batch_size=5, n_iter=5, random_state=42)
    rbm1.fit(X)

    h = rbm1._mean_hiddens(X[0])
    hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)

    assert_almost_equal(h, hs, decimal=1)
Пример #3
0
def test_gibbs():
    rng = np.random.RandomState(42)
    X = Xdigits[:100]
    rbm1 = BernoulliRBM(n_components=2, batch_size=5,
                        n_iter=5, random_state=rng)
    rbm1.fit(X)

    Xt1 = np.mean([rbm1.gibbs(X[0]) for i in range(100)], 0)
    Xt2 = np.mean([rbm1._sample_visibles(rbm1._sample_hiddens(X[0], rng), rng)
                   for i in range(1000)], 0)

    assert_almost_equal(Xt1, Xt2, decimal=1)
Пример #4
0
print("Debut training RBM1")
print(X_train.shape)
t0 = time.clock()
rbm_layer_1.fit(X_train)
print(time.clock() - t0)

# creation d'une base de train a partir d'echantillonnage
# de variable cachees du premier rbm
n_sample_second_layer_training = int(X.shape[0])
H1_train = np.zeros(shape=(n_sample_second_layer_training, rbm_layer_1.n_components))
H1_label_train = np.zeros(shape = (n_sample_second_layer_training, 1))
comp = 0
while (comp < n_sample_second_layer_training):
    rng = check_random_state(rbm_layer_1.random_state)
    randTemp = rd.randint(0, X.shape[0] - 1)
    H1_train[comp] = rbm_layer_1._sample_hiddens(X[randTemp], rng)
    H1_label_train[comp] = Y[randTemp]
    comp = comp + 1
 

#-------------------- Training du second rbm --------------------

# grid_search pour determiner parametres optimaux du deuxieme RBM.
grid_search_test = False
if grid_search_test:
    # Models we will use
    logistic = linear_model.LogisticRegression() # pour comparaison avec RBM + regression logistique
    rbm = BernoulliRBM(random_state=0, verbose=True)
    classifier = Pipeline(steps=[('rbm_layer_2', rbm), ('logistic', logistic)])

    parameters = {'rbm_layer_2__learning_rate': np.linspace(0.04, 0.05, num=10)}
Пример #5
0
rbm_layer_1.n_components = 300
# Training RBM
print("Debut training RBM1")
print(X_train.shape)
t0 = time.clock()
rbm_layer_1.fit(X_train)
print(time.clock() - t0)
# creation d'une base de train a partir d'echantillonnage
# de variable cachees du premier rbm
n_sample_second_layer_training = 3*int(X.shape[0])
H1_train = np.zeros(shape=(n_sample_second_layer_training, rbm_layer_1.n_components))
comp = 0
while (comp < n_sample_second_layer_training):
	rng = check_random_state(rbm_layer_1.random_state)
	randTemp = rd.randint(0, X.shape[0] - 1)
	H1_train[comp] = rbm_layer_1._sample_hiddens(X[randTemp], rng)
	comp = comp + 1

# Training du second rb
rbm_layer_2.learning_rate = 0.01
rbm_layer_2.n_iter = 50
rbm_layer_2.n_components = 300
# Training RBM
print("Debut training RBM2")
print(H1_train.shape)
t0 = time.clock()
rbm_layer_2.fit(H1_train)
print(time.clock() - t0)
rbm1w = rbm_layer_1.components_.T
bias1h = rbm_layer_1.intercept_hidden_
bias1h = bias1h.reshape(bias1h.size, 1)