Ejemplo n.º 1
0
def cross_fold(X, Y, n, *model_args, **model_kwargs):
    num_examples = len(Y)
    # shuffle data first
    idx = np.arange(num_examples)
    np.random.shuffle(idx)
    # split into n sets
    splits = np.split(idx, n)
    for i in range(n):
        # get train/test sets
        idx_test = splits[i]
        tr1 = splits[:i]
        tr2 = splits[i + 1:]
        tr1.extend(tr2)
        idx_train = np.concatenate(tr1)
        X_train = X[idx_train, :]
        X_test = X[idx_test, :]
        Y_train = Y[idx_train]
        Y_test = Y[idx_test]
        # create new model
        model = NeuralNet(*model_args, **model_kwargs)
        # train
        num_epochs = model.fit(X_train, Y_train)
        # compare
        out = model.predict(X_test)
        print(i, model.score(X_train, Y_train), num_epochs)
Ejemplo n.º 2
0
def run_blobs(n=5):
    X, Y = get_blob_sets(n)
    net = NeuralNet(4,
                    20,
                    6,
                    a_func=Sigmoid,
                    validation_set=(X[2:4], Y[2:4]),
                    multi_vsets=True)
    num_epochs = net.fit(X[:2], Y[:2], True)
    score = net.score(X[-1], Y[-1], multi_sets=False)
    print("accuracy:", score)
    print("epochs:", num_epochs)
Ejemplo n.º 3
0
 def test__weight_matrices(self):
     W, b = NeuralNet._weight_matrices([5, 20, 3])
     self.assertEqual(len(W), 2)
     self.assertEqual(len(b), 2)
     self.assertEqual(W[0].shape, (5, 20))
     self.assertEqual(W[1].shape, (20, 3))
     self.assertEqual(b[0].shape, (1, 20))
     self.assertEqual(b[1].shape, (1, 3))
Ejemplo n.º 4
0
Archivo: main.py Proyecto: mles2/UM
def display_f_test(inputs_from_feature_selection, dataset):
    print("ftest results: (f, p) =")
    print(
        " NN, KNN: ",
        combined_ftest_5x2cv(NeuralNet(10000).clf,
                             Knn(2).clf,
                             inputs_from_feature_selection,
                             dataset.target,
                             random_seed=1))
    print(
        " NN, SVM: ",
        combined_ftest_5x2cv(NeuralNet(10000).clf,
                             Svm().clf,
                             inputs_from_feature_selection,
                             dataset.target,
                             random_seed=1))
    print(
        " KNN, SVM: ",
        combined_ftest_5x2cv(Knn(2).clf,
                             Svm().clf,
                             inputs_from_feature_selection,
                             dataset.target,
                             random_seed=1))
Ejemplo n.º 5
0
 def test_predict(self):
     # init with preset values
     nn = NeuralNet(2, 3, 2)
     nn.W[0] = np.array([[.6, .2, 1], [1, .5, 0]])
     nn.W[1] = np.array([[.1, 1], [1, .25], [.1, 1]])
     nn.b[0] = np.array([[0., 0., 0.]])
     nn.b[1] = np.array([[1., -1.]])
     nn._classification = False
     # input values
     X = np.array([[1., .4], [0., 0.], [1., 1.], [2., .8]])
     # expected output
     Y = np.array([[1.6, 1.1], [1., 0.], [1.96, 1.775], [2.2, 3.2]])
     # actual output
     Z = nn.predict(X)
     # compare
     np.testing.assert_array_almost_equal(Y, Z)
Ejemplo n.º 6
0
 def test_forward_prop(self):
     # init with preset values
     nn = NeuralNet(2, 3, 2)
     nn.W[0] = np.array([[.1, .1, .1], [.1, .2, .3]])
     nn.W[1] = np.array([[.5, 1], [1, 2], [-2, 3]])
     nn.b[0] = np.array([[1.1, 2.1, 3.1]])
     nn.b[1] = np.array([[.3, -13]])
     # do one iteration of forward propagation
     x = np.array([1, 2])
     nn._forward_prop(x)
     # test values
     np.testing.assert_array_almost_equal(nn.Z[0],
                                          np.array([[1,
                                                     2]]))  # input vector
     np.testing.assert_array_almost_equal(
         nn.Z[1], np.array([[1.4, 2.6, 3.8]]))  # hidden layer output
     np.testing.assert_array_almost_equal(nn.Z[2], np.array(
         [[0, 5]]))  # Z_out = [-4,5] ReLU(Z_out) = [0,5]
Ejemplo n.º 7
0
 def test__constructor(self):
     nn = NeuralNet(5, 20, 3)
     # weight matrices
     self.assertEqual(len(nn.W), 2)
     self.assertEqual(nn.W[0].shape, (5, 20))
     self.assertEqual(nn.W[1].shape, (20, 3))
     # bias weights
     self.assertEqual(len(nn.b), 2)
     self.assertEqual(nn.b[0].shape, (1, 20))
     self.assertEqual(nn.b[1].shape, (1, 3))
     # output vectors
     self.assertEqual(len(nn.Z), 3)
     self.assertEqual(nn.Z[0].shape, (1, 5))
     self.assertEqual(nn.Z[1].shape, (1, 20))
     self.assertEqual(nn.Z[2].shape, (1, 3))
     # delta vectors
     self.assertEqual(len(nn.δ), 2)
     self.assertEqual(nn.δ[0].shape, (1, 20))
     self.assertEqual(nn.δ[1].shape, (1, 3))
Ejemplo n.º 8
0
Archivo: main.py Proyecto: mles2/UM
def evaluateNeuralNet(X_train, X_test, y_train, y_test):
    mlp = NeuralNet(10000)
    return evaluate_classifier(mlp, X_train, X_test, y_train, y_test)
Ejemplo n.º 9
0
 def test__nodes_per_layer(self):
     l1 = NeuralNet._nodes_per_layer(5, 20, 3)
     np.testing.assert_array_equal(l1, [5, 20, 3])
     l2 = NeuralNet._nodes_per_layer(5, [20, 13], 8)
     np.testing.assert_array_equal(l2, [5, 20, 13, 8])
Ejemplo n.º 10
0
 def test_back_prop(self):
     # init with preset values
     nn = NeuralNet(2, 3, 2)
     # -weights-
     nn.W[0] = np.array([[.6, .2, 1], [1, .5, 0]])
     nn.W[1] = np.array([[.1, 1], [1, .25], [.1, 1]])
     nn.b[0] = np.array([[0., 0., 0.]])
     nn.b[1] = np.array([[0., 0.]])
     # -outputs-
     nn.Z[2] = np.array([[.6, 2.1]])
     nn.Z[1] = np.array([[1, .4, 1]])
     nn.Z[0] = np.array([[1, .4]])
     # -learning rate-
     nn.C = 1.
     # do one iteration of back-prop
     y = np.array([[1.1, 2]])
     nn._back_prop(y)
     # check that it computes the correct delta values
     np.testing.assert_array_almost_equal(nn.δ[1], np.array([[.5, -.1]]))
     np.testing.assert_array_almost_equal(nn.δ[0],
                                          np.array([[-.05, .475, -.05]]))
     # check weight updates
     np.testing.assert_array_almost_equal(
         nn.W[1], np.array([[.6, .9], [1.2, .21], [.6, .9]]))
     np.testing.assert_array_almost_equal(
         nn.W[0], np.array([[.55, .675, .95], [.98, .69, -.02]]))
     # check bias weight updates
     np.testing.assert_array_almost_equal(nn.b[1], np.array([[.5, -.1]]))
     np.testing.assert_array_almost_equal(nn.δ[0],
                                          np.array([[-.05, .475, -.05]]))
Ejemplo n.º 11
0
                        '-o',
                        help='The output directory to save results to')
    if _args is None:
        return parser.parse_args()
    return parser.parse_args(_args)


if __name__ == '__main__':
    args = parse_args()
    train = get_sets(args.train, args.dir, args.ext, "Training")
    test = get_sets(args.test, args.dir, args.ext, "Test")
    validation = get_sets(args.validation, args.dir, args.ext, "Validation")
    model = NeuralNet(args.num_features,
                      args.num_hidden,
                      args.num_classes,
                      validation_set=validation,
                      multi_vsets=True,
                      max_epochs=args.max_epochs,
                      patience=args.patience,
                      learning_rate=args.learning_rate)
    num_epochs = model.fit(train[0], train[1], True)
    score = model.score(test[0], test[1], multi_sets=True)
    print("accuracy:", score)
    print("epochs:", num_epochs)
    if args.out:
        with open(args.out, 'a') as _f:
            print("Training Sets:",
                  args.train,
                  "Validation Sets:",
                  args.validation,
                  "Test Sets:",
                  args.test,