Beispiel #1
0
 def network_test():
     layer_dict = {'0':
         {'n_neuron': 10,        
          'incoming_layer_list': [],
          'incoming_weight_list': [],
          'bias': None,
          'loss': 'cross_entropy',
          'act_func_name': 'sigmoid',
          'value': None,
          'layer_type': 'input',
          'back_error' : 0,
          'drop_rate': 0.0,
          'link2input': X1,
          'link2target': None
          },
          '1': {'n_neuron':5, 'layer_type':'input', 'link2input':X2},
          '2': {'n_neuron': 20, 'incoming_layer_list': [0,1], 'act_func_name':'sigmoid','layer_type':'hidden', 'drop_rate':0.2},
          '3': {'n_neuron':2, 'incoming_layer_list': [2,], 'loss':'mse', 'act_func_name':'linear',
                'layer_type':'output', 'link2target':Y1},
          '4': {'n_neuron':3, 'incoming_layer_list': [2,], 'act_func_name':'softmax',
                'layer_type':'output', 'link2target':Y2}
         }
     network = NeuralNetwork(5,layer_dict = layer_dict)
     #outputs = network.feed_forward()
     #error = network.empirical_error()
     return network.fit(batch_size = 20, learning_rate = step_iterator(0.1,0.01,0), isnorm = True,
                        weight_decay = step_iterator(1e-5,1e-5,0), n_iter = 500, random_state = 20, switch_point = 500)
Beispiel #2
0
    def fit(self, batch_size=100, learning_rate=None, weight_decay=None, momentum=None, 
            n_iter=100, verbose = True, random_state=None, switch_point = None, isnorm = True, only_dropout = False):
        
        learning_rate = step_iterator(0.1,0.1,0) if learning_rate is None else learning_rate
        weight_decay = step_iterator(0,0,0) if weight_decay is None else weight_decay
        momentum = step_iterator(0,0,0.) if momentum is None else momentum
        switch_point = n_iter if switch_point is None else switch_point        
        
        rng = check_random_state(random_state)
        for layer in self.layer_list:
            layer.assign_random_state(rng)
            
        fit_func = self._fit
        for i_outer, iteration in enumerate(xrange(n_iter)):
            pl = 0.
            lr = learning_rate.next()
            wd = weight_decay.next()
            mom = momentum.next()
#            r_index = np.arange(self.input_layer_list[0].link2input.shape[0])
#            np.random.shuffle(r_index)
#            for inp_layer in self.input_layer_list:
#                inp_layer.link2input = inp_layer.link2input[r_index]
#            for out_layer in self.output_layer_list:
#                out_layer.link2target = out_layer.link2target[r_index]
            if verbose:
                begin = time()
            
            fit_func = self._fit_dropout if only_dropout else fit_func
            for batch_no in self.generate_batch(batch_size):
                #print("batch no:%d"%batch_no)
                pl_batch = fit_func(learning_rate = lr, weight_decay = wd, momentum = mom, max_lr_iter = 5, isnorm = isnorm)
                if verbose:
                    pl += pl_batch.sum()
            fit_func = self._fit if i_outer<switch_point else self._fit_with_minimize
            if verbose:
                #pl /= n_samples
                end = time()
                print("Iteration %d, pseudo-likelihood = %.2f, time = %.2fs"
                    % (iteration, pl, end-begin))
        return self
Beispiel #3
0
RBMlayers_dict = {0:     {
                            'dimension':X_train_bin.shape[1],     
                            'bias': None,
                            'value': None,
                            'layer_type': 'binary',
                            'layer_name': 'input'
                         },
                      1: {
                            'dimension':200,     
                            'bias': None,
                            'value': None,
                            'layer_type': 'binary',
                            'layer_name': 'hidden'
                         }
              }
learning_rate, weight_decay, momentum= step_iterator(0.01,0.001,-0.002), step_iterator(2e-5,2e-5,0), step_iterator(0.5,0.9,0.05)
batch_func = batch_func_generator(X_train_bin, batch_size = 100)
rbm = BinRBM(layers_dict = RBMlayers_dict, weight_list = None, random_state = None)
print 'Training starts'
rbm.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 500, verbose = True)
combined = rbm.transform(X_train_bin)
combined_test = rbm.transform(X_test_bin)

parameters = {'alpha':[1e-1,1e-2,1e-3,1e-4,1e-5], 'n_iter': [10, 50 ,100]}
clf = GridSearchCV(SGDClassifier(loss="hinge", penalty="l2", n_iter = 100, random_state = 500), parameters) 
clf.fit(combined,labels_train)
y_pred = clf.predict(combined_test)
print 'toplam: ', labels_test.shape[0], 'dogru: ', (y_pred == labels_test).sum()
print clf.best_estimator_
Beispiel #4
0
RBM3layers_dict = {0: {
                            'dimension':RBM2layers_dict[1]['dimension'],     
                            'bias': None,
                            'value': None,
                            'layer_type': 'binary',
                            'layer_name': 'input'
                         },
                      1: {
                            'dimension':2000,     
                            'bias': None,
                            'value': None,
                            'layer_type': 'binary',
                            'layer_name': 'hidden'
                         }
              }
learning_rate, weight_decay, momentum= step_iterator(0.1,0.1,0), step_iterator(2e-4,2e-4,0), step_iterator(0.5,0.9,0.05)
batch_func = batch_func_generator(X_train, batch_size = 100)
rbm0 = BinRBM(layers_dict = RBM1layers_dict, weight_list = None, random_state = None)
rbm0.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 50, verbose = True)
X_train = rbm0.transform(X_train, sample = False)

learning_rate, weight_decay, momentum= step_iterator(0.1,0.1,0), step_iterator(2e-4,2e-4,0), step_iterator(0.5,0.9,0.05)
batch_func = batch_func_generator(X_train, batch_size = 100)
rbm1 = BinRBM(layers_dict = RBM2layers_dict, weight_list = None, random_state = None)
rbm1.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 50, verbose = True)
X_train = rbm1.transform(X_train, sample = False)

learning_rate, weight_decay, momentum= step_iterator(0.1,0.1,0), step_iterator(2e-4,2e-4,0), step_iterator(0.5,0.9,0.05)
batch_func = batch_func_generator(X_train, batch_size = 100)
Beispiel #5
0
X_all = np.vstack((X_train, X_test))
scl = StandardScaler()
X_all = scl.fit_transform(X_all)
X_train = X_all[:50000]
X_test = X_all[50000:]
del X_all
X_train_copy = X_train.copy()
X_test_copy = X_test.copy()

## 3072-8192, 8192-4096, 4096-2048, 2048-1024, 1024-512
random_state = 500
RBMlayers_dict = {
    0: {"dimension": X_train.shape[1], "bias": None, "value": None, "layer_type": "linear", "layer_name": "input"},
    1: {"dimension": 8192, "bias": None, "value": None, "layer_type": "binary", "layer_name": "hidden"},
}
learning_rate, weight_decay, momentum = step_iterator(1, 1, 0), step_iterator(0, 0, 0), step_iterator(0, 0, 0)
batch_func = batch_func_generator(X_train, batch_size=100)
rbm = GaussRBM(layers_dict=RBMlayers_dict, weight_list=None, random_state=random_state)
print "Training starts"
rbm.fit(
    batch_func,
    PCD=False,
    error_function="recon",
    learning_rate=learning_rate,
    momentum=momentum,
    weight_decay=weight_decay,
    k=1,
    perst_size=100,
    n_iter=50,
    verbose=True,
)
Beispiel #6
0
RBMBlayers_dict = {0:     {
                            'dimension':B_train.shape[1],     
                            'bias': None,
                            'value': None,
                            'layer_type': 'linear',
                            'layer_name': 'input'
                         },
                      1: {
                            'dimension':500,     
                            'bias': None,
                            'value': None,
                            'layer_type': 'binary',
                            'layer_name': 'hidden'
                         } }

learning_rate, weight_decay, momentum= step_iterator(1,1,0), step_iterator(0,0,0), step_iterator(0,0,0)
batch_func = batch_func_generator(R_train, batch_size = 50)
rbmR = GaussRBM(layers_dict = RBMRlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmR.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
      #          sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
batch_func = batch_func_generator(G_train, batch_size = 50)
rbmG = GaussRBM(layers_dict = RBMGlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'
rbmG.fit(batch_func, PCD = False, error_function = 'recon',learning_rate = learning_rate, momentum = momentum,
                weight_decay = weight_decay, k = 1, perst_size = 100, n_iter = 20, verbose = True)
     #           sparsity_cond = True, sparsity_target = 0.01, sparsity_lambda = 1e-6)
batch_func = batch_func_generator(B_train, batch_size = 50)
rbmB = GaussRBM(layers_dict = RBMBlayers_dict, weight_list = None, random_state = random_state)
print 'Training starts'