コード例 #1
0
ファイル: cifar_deep_auto.py プロジェクト: umutekmekci/deepNN
X_all = scl.fit_transform(X_all)
X_train = X_all[:50000]
X_test = X_all[50000:]
del X_all
X_train_copy = X_train.copy()
X_test_copy = X_test.copy()

## 3072-8192, 8192-4096, 4096-2048, 2048-1024, 1024-512
random_state = 500
RBMlayers_dict = {
    0: {"dimension": X_train.shape[1], "bias": None, "value": None, "layer_type": "linear", "layer_name": "input"},
    1: {"dimension": 8192, "bias": None, "value": None, "layer_type": "binary", "layer_name": "hidden"},
}
learning_rate, weight_decay, momentum = step_iterator(1, 1, 0), step_iterator(0, 0, 0), step_iterator(0, 0, 0)
batch_func = batch_func_generator(X_train, batch_size=100)
rbm = GaussRBM(layers_dict=RBMlayers_dict, weight_list=None, random_state=random_state)
print "Training starts"
rbm.fit(
    batch_func,
    PCD=False,
    error_function="recon",
    learning_rate=learning_rate,
    momentum=momentum,
    weight_decay=weight_decay,
    k=1,
    perst_size=100,
    n_iter=50,
    verbose=True,
)
#          sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
コード例 #2
0
ファイル: dbn_pixel.py プロジェクト: umutekmekci/deepNN
                            'bias': None,
                            'value': None,
                            'layer_type': 'linear',
                            'layer_name': 'input'
                         },
                      1: {
                            'dimension':500,     
                            'bias': None,
                            'value': None,
                            'layer_type': 'binary',
                            'layer_name': 'hidden'
                         } }

learning_rate, weight_decay, momentum= step_iterator(1,1,0), step_iterator(0,0,0), step_iterator(0,0,0)
batch_func = batch_func_generator(R_train, batch_size = 50)
rbmR = GaussRBM(layers_dict = RBMRlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmR.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
      #          sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
batch_func = batch_func_generator(G_train, batch_size = 50)
rbmG = GaussRBM(layers_dict = RBMGlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmG.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
     #           sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
batch_func = batch_func_generator(B_train, batch_size = 50)
rbmB = GaussRBM(layers_dict = RBMBlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmB.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)