コード例 #1
0
ファイル: cifar_deep_auto.py プロジェクト: umutekmekci/deepNN
    momentum=momentum,
    weight_decay=weight_decay,
    k=1,
    perst_size=100,
    n_iter=50,
    verbose=True,
)
#          sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)

rbm_list = []
rbm_list.append(rbm)

dimen = 8192
for _ in xrange(4):
    dimen = dimen / 2
    X_train = rbm.transform(X_train)
    RBMlayers_dict = {
        0: {"dimension": X_train.shape[1], "bias": None, "value": None, "layer_type": "binary", "layer_name": "input"},
        1: {"dimension": dimen, "bias": None, "value": None, "layer_type": "binary", "layer_name": "hidden"},
    }
    learning_rate, weight_decay, momentum = (
        step_iterator(0.1, 0.01, -0.02),
        step_iterator(0, 0, 0),
        step_iterator(0.1, 0.9, 0.1),
    )
    batch_func = batch_func_generator(X_train, batch_size=100)
    rbm = BinRBM(layers_dict=RBMlayers_dict, weight_list=None, random_state=random_state)
    print "Training starts"
    rbm.fit(
        batch_func,
        PCD=False,
コード例 #2
0
ファイル: dbn_pixel.py プロジェクト: umutekmekci/deepNN
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
      #          sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
batch_func = batch_func_generator(G_train, batch_size = 50)
rbmG = GaussRBM(layers_dict = RBMGlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmG.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
     #           sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
batch_func = batch_func_generator(B_train, batch_size = 50)
rbmB = GaussRBM(layers_dict = RBMBlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmB.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
         #       sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)

rbmR_hidden, rbmG_hidden, rbmB_hidden = rbmR.transform(R_train), rbmG.transform(G_train), rbmB.transform(B_train)
rbmR_hidden_test, rbmG_hidden_test, rbmB_hidden_test = rbmR.transform(R_test), rbmG.transform(G_test), rbmB.transform(B_test)

clf = svm.SVC(kernel = 'linear', C = 100, random_state=random_state)
clf.fit(np.hstack((rbmR_hidden, rbmG_hidden, rbmB_hidden)),labels_train)
y_pred = clf.predict(np.hstack((rbmR_hidden_test, rbmG_hidden_test, rbmB_hidden_test)))
print 'toplam: ', labels_test.shape[0], 'dogru: ', (y_pred == labels_test).sum()

clf = svm.SVC(kernel = 'rbf', gamma = 0.01, C = 10, random_state=random_state)
clf.fit(np.hstack((rbmR_hidden, rbmG_hidden, rbmB_hidden)),labels_train)
y_pred = clf.predict(np.hstack((rbmR_hidden_test, rbmG_hidden_test, rbmB_hidden_test)))
print 'toplam: ', labels_test.shape[0], 'dogru: ', (y_pred == labels_test).sum()



RBMlayers_dict = {0:     {