class Test(unittest.TestCase): trainData = np.random.rand(100, 784) trainLabel = np.random.randint(10, size=(100,), dtype=np.uint8) train = [trainData, trainLabel] testData = np.random.rand(50, 784) testLabel = np.random.randint(10, size=(50,), dtype=np.uint8) test = [testData, testLabel] nn = NN.NN(training=train, testing=test, lr=0.003, mu=.99, minibatch=100, dropout=1, disableLog=True) def test_num_hidden_layers(self): self.nn.addLayers([10, 10], ['relu', 'relu']) self.assertEqual(self.nn.nHidden, 2) def test_relu(self): z = np.zeros((5, 5)) r = af.ReLU(z) self.assertTrue(r.min() == r.max()) def test_sigmoid_zero(self): self.assertEqual(af.sigmoid(0), 0.5) def test_sigmoid_max(self): self.assertAlmostEqual(af.sigmoid(1000), 1) def test_sigmoid_min(self): self.assertAlmostEqual(af.sigmoid(-1000), 0)
def mod_sel(): n1 = 20 n2 = 40 while n2 <= 300: while n1 <= 300: i = 0 acc = [] for train_index, test_index in kf.split(x): train_set = [ TRAINING[0][train_index], TRAINING[1][train_index] ] val_set = [TRAINING[0][test_index], TRAINING[1][test_index]] nn = NN.NN(training=train_set, testing=val_set, lr=0.003, mu=.99, minibatch=100) nn.addLayers([n1, n2], ['relu', 'relu']) _, acc_val = nn.train(stop_function=0, num_epochs=120) i += 1 acc.append(acc_val) if i == end: print(np.mean(acc)) m = [str([n1, n2]), np.mean(acc)] print('Mean accuracy on validation: ' + str(m)) means.append(m) n1 += 20 n2 += 20 n1 = 20 return means
def mod_sel(n1, n2): kf = KFold(n_splits=3, random_state=42, shuffle=True) x = TRAINING[0] means = [] end = kf.get_n_splits(x) i = 0 acc = [] for train_index, test_index in kf.split(x): train_set = [TRAINING[0][train_index], TRAINING[1][train_index]] val_set = [TRAINING[0][test_index], TRAINING[1][test_index]] nn = NN.NN(training=train_set, testing=val_set, lr=0.003, mu=.99, minibatch=100) nn.addLayers([n1, n2], ['relu', 'relu']) _, acc_val = nn.train(stop_function=0, num_epochs=120) i += 1 acc.append(acc_val) if i == end: m = [str([n1, n2]), round(np.mean(acc), 2)] print('Mean accuracy on validation: ' + str(m)) means.append(m) return means
for n in [[250,100]]: nn = NN.NN(training=TRAINING, testing=TESTING, lr=0.003, mu=.99, minibatch=100) NN.NN.update_layers = normal_upd nn.addLayers(n, ['relu', 'relu','tanh']) a,b=nn.train(stop_function=0, num_epochs=150) w = (nn.getWeigth()) for p in [10,20]: print("Pruning="+str(p)+"%") w1=np.copy(w) pr.set_pruned_layers(nn, p, w1) nn.train(stop_function=0, num_epochs=50)''' for n in [[250, 100]]: nn = NN.NN(training=TRAINING, testing=TESTING, lr=0.003, mu=.99, minibatch=100) NN.NN.update_layers = normal_upd nn.addLayers(n, ['leakyrelu', 'leakyrelu', 'sigmoid']) a, b = nn.train(stop_function=0, num_epochs=150) w = (nn.getWeigth()) for p in [10, 20]: print("Pruning=" + str(p) + "%") w1 = np.copy(w) pr.set_pruned_layers(nn, p, w1) nn.train(stop_function=0, num_epochs=50) for n in [[250, 100]]: nn = NN.NN(training=TRAINING, testing=TESTING,
labels = np.linspace(1, len(bin_data), num=len(bin_data), dtype=np.float64) labels = labels / len(bin_data) labels = np.reshape(labels, (-1, 1)) p = np.random.RandomState(seed=42).permutation(dim_set) bin_data_perm = bin_data[p] labels_perm = labels[p] now = time.time() nn = NN.NN(training=[bin_data_perm, labels_perm], testing=[[0], [0]], lr=0.005, mu=0.9, lambd=1e-6, minibatch=64, disableLog=True) #file3, lr=0.005 --> 4 48s #file7, lr=0.005 --> 40 28s nn.addLayers([256], ['leakyrelu', 'leakyrelu']) nn.train(stop_function=2, num_epochs=20000) later = time.time() difference = int(later - now) #print("R2: {}".format(r2_score(labels, nn.predict(bin_data)))) max_err = 0 mean_err = 0 for j in range(dim_set):
dim_set = len(bin_data) labels = np.linspace(1, len(bin_data), num=len(bin_data), dtype=np.float64) labels = labels / len(bin_data) labels = np.reshape(labels, (-1, 1)) p = np.random.RandomState(seed=42).permutation(dim_set) bin_data_perm = bin_data[p] labels_perm = labels[p] lr = 0.003 if i > 2 else 0.0005 nn = NN.NN(training=[bin_data_perm, labels_perm], testing=[[0], [0]], lr=lr, mu=0.9, lambd=1.e-5, minibatch=64, disableLog=True) nn.addLayers([256], ['leakyrelu', 'tanh']) nn.train(stop_function=2, num_epochs=20000) print("R2: {}".format(r2_score(labels, nn.predict(bin_data)))) max_err = 0 mean_err = 0 for j in range(dim_set): pr = floor(nn.predict(bin_data[j])[0] * dim_set) val = abs(pr - labels[j] * dim_set) if val > max_err: max_err = val mean_err += val