Exemple #1
0
 def testSaveLoadWeightsFunctionality(self):
     nn = neuralnet.NeuralNetwork(2, 3, 2)
     data = [((0, 0), (0, 1)), ((0, 1), (1, 0)), ((1, 0), (1, 0)),
             ((1, 1), (0, 1))]
     for n in range(10):
         nn.train_network(data,
                          iters=15000,
                          change_rate=0.02,
                          momentum=0.001)
     out = nn.evaluate(data[0][0])
     assert out[0] < 0.2 and out[1] > 0.8
     out = nn.evaluate(data[1][0])
     assert out[0] > 0.8 and out[1] < 0.2
     out = nn.evaluate(data[2][0])
     assert out[0] > 0.8 and out[1] < 0.2
     out = nn.evaluate(data[3][0])
     assert out[0] < 0.2 and out[1] > 0.8
     nn.save_weights('./weights.test')
     nn2 = neuralnet.NeuralNetwork(2, 3, 2)
     nn2.load_weights('./weights.test')
     out = nn2.evaluate(data[0][0])
     assert out[0] < 0.2 and out[1] > 0.8
     out = nn2.evaluate(data[1][0])
     assert out[0] > 0.8 and out[1] < 0.2
     out = nn2.evaluate(data[2][0])
     assert out[0] > 0.8 and out[1] < 0.2
     out = nn2.evaluate(data[3][0])
     assert out[0] < 0.2 and out[1] > 0.8
Exemple #2
0
 def testSaveLoad(self):
     nn = neuralnet.NeuralNetwork(2, 3, 2)
     nn.save_weights('./save.test')
     nn2 = neuralnet.NeuralNetwork(2, 3, 2)
     nn2.load_weights('./save.test')
     assert nn.weights_hid_one == nn2.weights_hid_one
     assert nn.weights_hid_two == nn2.weights_hid_two
     assert nn.weights_out == nn2.weights_out
def main():
    global wins
    global losses
    global net2_total_right
    global net2_total_wrong
    global net3_total_right
    global net3_total_wrong

    # set number of games and create neural nets
    games = 100000
    two_card_net = nn.NeuralNetwork(3, 12, 2, 0.01)
    three_card_net = nn.NeuralNetwork(4, 16, 2, 0.01)

    # set up players and deck
    random.seed()
    deck = make_deck()
    shuffle(deck)
    player = Player()
    dealer = Dealer()

    # run all games
    for i in range(games):
        # run a single game
        result = play(player, dealer, deck, two_card_net, three_card_net)
        if result == 0:
            losses += 1
        elif result == 1:
            wins += 1

        # reset for next game
        for card in player.hand:
            deck.append(card)
        for card in dealer.hand:
            deck.append(card)
        deck.append(dealer.face_up_card)
        player.clear()
        dealer.clear()
        shuffle(deck)

        # print game number and percent correct choices
        if i % 10000 == 0:
            print("finished game number", i)
            if net2_total_wrong > 0 and net2_total_right > 0:
                two_card_percent = net2_total_right / (net2_total_right +
                                                       net2_total_wrong)
                print("two card net right choice:   %.02f%%" %
                      (two_card_percent * 100))
                print("two card net confidence:     %.04f" %
                      two_card_net.confidence)
            if net3_total_wrong > 0 and net3_total_right > 0:
                three_card_percent = net3_total_right / (net3_total_right +
                                                         net3_total_wrong)
                print("three card net right choice: %.02f%%" %
                      (three_card_percent * 100))
                print("three card net confidence:   %.04f\n" %
                      three_card_net.confidence)
    def test_random_network(self):

        nn = neuralnet.NeuralNetwork(self.NETWORK_SHAPE)
        genes = neuralnet.flatten(nn.weights)
        fit = self.get_fitness(genes)

        print("fitness: " + str(fit))
Exemple #5
0
 def testEvaluation(self):
     nn = neuralnet.NeuralNetwork(2, 2, 1)
     data = [((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 0)]
     for val, out in data:
         ans = nn.evaluate(val)
         for val in ans:
             assert -1 < val < 1
def main():
    # Note: Preprocessed data should be in folder preprocessed
    v = processMFCC('instruments_07/banjo/banjo_A3_very-long_forte_normal.wav')
    print('len(input layer) = ' + str(len(v)))
    #raise Exception
    P = Preprocess()
    #P.processData('preprocessed/processed_01.txt',directory='instruments_07',fs_in=8000,length=input_length,q=1,divide=1,comment = 'Instrument Data')
    P.processData('preprocessed/training_02.txt',
                  directory='instr_train_03',
                  way='mfcc',
                  opt=[2048])
    P.loadData('preprocessed/training_02.txt')
    X, Y = P.getXY()
    print('Input Layer Length: ' + str(len(X[0])))
    print('Output Layer Length: ' + str(len(Y[0])))
    input_size = P.getInputLength()
    output_size = P.getOutputLength()
    net = NN.NeuralNetwork([input_size, 100, output_size], 'sigmoid')
    net.storeWeights('weights/instr_03')
    net.loadWeights('weights/instr_03')
    #net.trainWithPlots(X,Y,learning_rate=1,intervals = 100,way='max')

    Q = Preprocess()
    Q.processData('preprocessed/testing_02.txt',
                  directory='instr_test_03',
                  way='mfcc',
                  opt=[2048])
    Q.loadData('preprocessed/testing_02.txt')
    tX, tY = Q.getXY()
    net.testBatch(tX, tY)
Exemple #7
0
def best_opt():

    optimizer = ['sgd', 'momentum', 'nesterov', 'rmsprop', 'adam', 'nadam']
    histories = dict()
    model = []
    for i, opt in enumerate(optimizer):
        print('Training with ' + opt + ' optimizer:')
        model.append(nn.NeuralNetwork())
        model[i].add(nn.Layer(32, activation='relu'))
        model[i].add(nn.Layer(32, activation='relu'))
        model[i].add(nn.Layer(1, activation='linear'))
        stop = nn.EarlyStopping(patience=5, delta=1e-3, restore_weights=True)
        model[i].compile(epochs=30,
                         learning_rate=1e-3,
                         loss='mse',
                         optimizer=opt,
                         earlystop=stop)
        model[i].fit(x_train, y_train, x_val, y_val, batch_size=32)
        histories[opt] = model[i].history
    min_train_loss = 100
    for i, opt in enumerate(histories.keys()):
        if histories[opt]['train_acc'][-1] < min_train_loss:
            min_train_loss = histories[opt]['train_acc'][-1]
            best_model = model[i]
    print('Best optimizer: ' + best_model.optimizer)
    return histories, best_model
Exemple #8
0
def main():

    X, Y = getData()
    X, Y = shuffle(X, Y)
    K = len(np.unique(Y))
    N = len(Y)
    T = np.zeros((N, K))

    for i in range(N):
        T[i, Y[i]] = 1  # one hot encoding for targets

    batch_sz = 500
    learning_rate = [10e-5, 10e-6, 10e-7, 10 - 8, 10e-9]
    num_batches = len(learning_rate)
    trainCost = []
    validCost = []
    accValid = []
    accTrain = []
    for i in range(num_batches):
        m = nn.NeuralNetwork(numHiddenLayer=1,
                             numHiddenUnits=200,
                             actFunc="Tanh")
        trainCost[i], validCost[i], accTrain[i], accValid[i] = m.train(
            X, T, epochs=10000, learning_rate=10e-7, reg=10e-7)

    print("Final Train Accuracy {}".format(accTrain))
    print("Final Valid Accuracy {}".format(accValid))
    legend1, = plt.plot(trainCost, label='training error')
    legend2, = plt.plot(validCost, label='validation error')
    plt.legend([legend1, legend2])
    plt.show()
Exemple #9
0
 def testDot(self):
     nn = neuralnet.NeuralNetwork(2, 2, 1)
     m1 = [1, 2, 3]
     m2 = [(1, 2), (3, 4), (4, 3)]
     assert nn._dot(m1, m2) == [19, 19]
     m1 = [1, 2, 3, 4]
     self.assertRaises(ValueError, nn._dot, m1, m2)
Exemple #10
0
    def populate(self, total, bestBrain):

        if (bestBrain is None):
            for i in range(total):
                self.doodler.append(Player.Player(nn.NeuralNetwork(5, 4, 3)))
        else:
            for i in range(total):
                self.doodler.append(Player.Player(bestBrain))

        return self.doodler
Exemple #11
0
 def testTanh(self):
     nn = neuralnet.NeuralNetwork(3, 2, 1)
     assert nn._tanh(3) > 0.99
     assert nn._tanh(4) > 0.999
     assert nn._tanh(20) > 0.999
     assert nn._tanh(-3) < 0.99
     assert nn._tanh(-12) < 0.99
     assert nn._tanh(0) == 0
     assert 0.46211 < nn._tanh(0.5) < 0.46212
     assert round(nn._tanh(0.1), 6) == round((-1 * nn._tanh(-0.1)), 6)
Exemple #12
0
def test_xor():
    X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    Y = np.array([0, 1, 1, 0])

    m = nn.NeuralNetwork()
    trainCost, validCost = m.train(X, Y)
    pYtrain = m.forward(
        Xtrain, Ttrain)  # pass data through last output from NN on train data
    pYvalid = m.forward(Xvalid, Xvalid)  # pass validation data through NN

    print("Final Train Accuracy: {}".format(m.accuracy(pYtrain, Ttrain)))
    print("Final Validation Accuracy: {}".format(m.accuracy(pYvalid, Tvalid)))
Exemple #13
0
 def testMatrixMaker(self):
     nn = neuralnet.NeuralNetwork(2, 2, 1)
     matrix = nn._make_matrix(2, 3)
     assert len(matrix) == 2
     for row in matrix:
         assert len(row) == 3
     matrix = nn._make_matrix(3, 4, 7)
     assert matrix == [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]
     matrix = nn._make_matrix(3, 4)
     assert matrix != [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]
     for row in matrix:
         for item in row:
             assert -1 < item < 1
    def test_train(self):
        network = neuralnet.NeuralNetwork(3, 2, 1, 0.5)
        network.weights_input_to_hidden = test_w_i_h.copy()
        network.weights_hidden_to_output = test_w_h_o.copy()
        network.train(inputs, targets)

        self.assertTrue(
            np.allclose(network.weights_hidden_to_output,
                        np.array([[0.37275328], [-0.03172939]])))
        self.assertTrue(
            np.allclose(
                network.weights_input_to_hidden,
                np.array([[0.10562014, -0.20185996], [0.39775194, 0.50074398],
                          [-0.29887597, 0.19962801]])))
Exemple #15
0
    def bestOne(self, array):
        max = 0
        currentBest = Player.Player(nn.NeuralNetwork(5, 4, 3))

        for b in array:
            if (b.fitness >= max):
                max = b.fitness  # saves current max fitness
                currentBest = b  # saves current best player

        # if current best from the generation is better than all-time best
        if (currentBest.fitness >= self.bestFitness):
            #print("BEST")
            self.best = currentBest.clone()  # clone the current best player
            self.best.fitness = currentBest.fitness
            self.bestFitness = currentBest.fitness
Exemple #16
0
    def __initiate_numpy_neural_net(self):
        N_i = self.train_features.shape[1]
        network = neuralnet.NeuralNetwork(N_i, self.hidden_nodes, self.output_nodes, self.learning_rate)
        losses = {'train':[], 'validation':[]}

        logger.info("Initializing Neural Network Training")
        for ii in range(self.iterations):
            batch = np.random.choice(self.train_features.index, size=128)
            X, y = self.train_features.ix[batch].values, self.train_targets.ix[batch]['cnt']
            network.train(X, y)

            train_loss = neuralnet.MSE(network.run(self.train_features).T, self.train_targets['cnt'].values)
            val_loss = neuralnet.MSE(network.run(self.val_features).T, self.val_targets['cnt'].values)
            if (ii % 50 == 0):
                progress = str("\rProgress: {:2.1f}".format(100 * ii/float(self.iterations)) \
                             + "% ... Training loss: " + str(train_loss)[:5] \
                             + " ... Validation loss: " + str(val_loss)[:5])
                logger.debug(progress)

            losses['train'].append(train_loss)
            losses['validation'].append(val_loss)

        logger.info("Training Complete")
        logger.info("Generating Loss Plot")
        plt.plot(losses['train'], label='Training loss')
        plt.plot(losses['validation'], label='Validation loss')
        plt.legend()
        _ = plt.ylim()
        plt.savefig("assets/loss.png")
        logger.info("Loss Plot Generated")

        logger.info("Generating Prediction Plot")

        fig, ax = plt.subplots(figsize=(8,4))

        mean, std = self.scaled_features['cnt']
        predictions = network.run(self.test_features).T*std + mean
        ax.plot(predictions[0], label='Prediction')
        ax.plot((self.test_targets['cnt']*std + mean).values, label='Data')
        ax.set_xlim(right=len(predictions))
        ax.legend()

        dates = pd.to_datetime(self.rides.ix[self.test_data.index]['dteday'])
        dates = dates.apply(lambda d: d.strftime('%b %d'))
        ax.set_xticks(np.arange(len(dates))[12::24])
        _ = ax.set_xticklabels(dates[12::24], rotation=45)
        plt.savefig("assets/prediction.png")
        logger.info("Prediction Plot Generated")
    def get_fitness(self, genes):
        weights = neuralnet.unflatten(self.NETWORK_SHAPE, genes)
        network = neuralnet.NeuralNetwork(self.NETWORK_SHAPE, weights=weights)

        result = Result()
        for i in range(self.NUM_FITNESS_TESTS):
            result = run_game(network, result)

        for i in range(len(result.hit_log)):
            result.hit_log[i] /= self.NUM_FITNESS_TESTS

        return Fitness(
            result.fails / self.NUM_FITNESS_TESTS,
            result.repeats / self.NUM_FITNESS_TESTS,
            result.tries / self.NUM_FITNESS_TESTS / self.NETWORK_SHAPE[0],
            result.misses / self.NUM_FITNESS_TESTS,
            result.hits / self.NUM_FITNESS_TESTS, result.hit_log)
Exemple #18
0
 def testOverall(self):
     nn = neuralnet.NeuralNetwork(2, 3, 2)
     data = [((0, 0), (0, 1)), ((0, 1), (1, 0)), ((1, 0), (1, 0)),
             ((1, 1), (0, 1))]
     for n in range(10):
         nn.train_network(data,
                          iters=15000,
                          change_rate=0.02,
                          momentum=0.01)
     out = nn.evaluate(data[0][0])
     assert out[0] < 0.2 and out[1] > 0.8
     out = nn.evaluate(data[1][0])
     assert out[0] > 0.8 and out[1] < 0.2
     out = nn.evaluate(data[2][0])
     assert out[0] > 0.8 and out[1] < 0.2
     out = nn.evaluate(data[3][0])
     assert out[0] < 0.2 and out[1] > 0.8
Exemple #19
0
def main():
    X, Y = getBinaryData()

    X0 = X[Y==0, :]
    X1 = X[Y==1, :]
    X1 = np.repeat(X1, 9, axis=0)
    X = np.vstack([X0, X1])
    Y = np.array([0]*len(X0) + [1]*len(X1))
    X, Y = shuffle(X, Y)
    K = len(np.unique(Y))
    N = len(Y)
    T = np.zeros((N, K))
    for i in range(N):
        T[i, Y[i]] = 1 # one hot encoding for targets

    m = nn.NeuralNetwork(numHiddenLayer=1,numHiddenUnits=100,actFunc="Sigmoid")
    trainCost, validCost, accTrain, accValid = m.train(X, T, epochs=10000, learning_rate=5*10e-7)

    print("Final Train Accuracy {}".format(accTrain))
    print("Final Valid Accuracy {}".format(accValid))
    legend1, = plt.plot(trainCost, label='training error')
    legend2, = plt.plot(validCost, label='validation error')
    plt.legend([legend1, legend2])
    plt.show()
'''
Created on Sep 19, 2012

@author: [email protected]

Example app using the neuralnet package. It will learn the XOR gate.
'''

from __future__ import print_function
import neuralnet
'''
Define the neural net slightly differently. Instead of interpreting a
single output as a 1 or 0, have two outputs, one representing 1, the
second 0.
'''
nn = neuralnet.NeuralNetwork(2, 3, 2)

# Generally you want different data sets for training and testing, but
# we're very limited with XOR.
data_train = [((0, 0), (-1, 1)), ((0, 1), (1, -1)), ((1, 0), (1, -1)),
              ((1, 1), (-1, 1))]

for n in range(5000):
    # There are a handful of optional kw args, but defaults are OK.
    nn.train_network(data_train, iters=5, momentum=0.001, change_rate=0.02)

    for i in range(4):
        out = nn.evaluate(data_train[i][0])
        print('data[{}] -> {}'.format(data_train[i][0], out[0]))

    print('\n******************************\n')
Exemple #21
0
 def testEvaluationError(self):
     nn = neuralnet.NeuralNetwork(4, 5, 2)
     values = [1, 2, 3, 4, 5]
     self.assertRaises(ValueError, nn.evaluate, values)
Exemple #22
0
 def testOverflow(self):
     nn = neuralnet.NeuralNetwork(2, 2, 1)
     for i in range(10):
         val = decimal.Decimal(random.random())
         tan = nn._tanh(float(val))
         assert -1 < tan < 1
 def test_run(self):
     network = neuralnet.NeuralNetwork(3, 2, 1, 0.5)
     network.weights_input_to_hidden = test_w_i_h.copy()
     network.weights_hidden_to_output = test_w_h_o.copy()
     self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
Exemple #24
0
    def testLargerNetworks(self):
        ''' LONG RUNNING TEST. Make a list of relatively simple but big data,
        make sure the nnet can learn it. '''
        return

        import math
        import datetime
        # Just change these sizes to change the network sizes tested.
        min_size = 180
        max_size = 200
        skip = 9
        # Percentage of success. It below this at any time it fails.
        acceptance_rate = 0.8
        failures = []
        times = []

        for size in range(min_size, max_size, skip):
            print('on %d' % size)
            num_out = int(math.sqrt(size))
            nn = neuralnet.NeuralNetwork(size, size, num_out)
            data = []
            for i in range(max_size):
                # Consider the input the index of number 1. i.e. [0,0,0,1] is
                # thought of as 3.
                input_bits = [1 if j == i else 0 for j in range(size)]
                # Learn math: answer = root(input_index) - input_index
                # In other words: answer[input_index^2 + input_index] = 1
                answer = [
                    1 if input_bits[j * j + j] == 1 else -1
                    for j in range(num_out)
                ]
                data.append((input_bits, answer))

            # Training
            start_time = datetime.datetime.now()
            for n in range(5):
                nn.train_network(data,
                                 iters=10000,
                                 change_rate=0.02,
                                 momentum=0.01)
            t = (datetime.datetime.now() - start_time).total_seconds()
            times.append(t)

            # Testing
            # Var to count number of failures for each size
            num_failures = 0
            for set in data:
                res = nn.evaluate(set[0])
                res = [round(x) for x in res]
                #self.assertEqual(res, set[1], 'Fail with size %d' %size)
                if res != set[1]:
                    # The network got it wrong
                    num_failures += 1
            print('%d wrong' % num_failures)
            win_percent = float(max_size - num_failures) / float(max_size)
            print('%f percent right' % (win_percent * 100))
            assert win_percent >= acceptance_rate
            failures.append(num_failures)

        visual_desired = False
        if visual_desired:
            # Plot it so I can look at something nice in a couple hours.
            try:
                import matplotlib.pyplot as lab
            except:
                ''' Install matplotlib. '''
                pass
            else:
                x_data = range(min_size, max_size, skip)
                lab.title(
                    'Errors & Time vs Network Size -- Constant Iterations')
                lab.xlabel('Network Size (# of input and hidden neurons)')
                lab.ylabel('Errors (on data of size %d) and Time (seconds)' %
                           max_size)
                lab.plot(x_data, failures, color='r', label='Failures')
                lab.plot(x_data, times, color='b', label='Time (seconds)')
                lab.legend()
                lab.show()
 def test_activation(self):
     network = neuralnet.NeuralNetwork(3, 2, 1, 0.5)
     self.assertTrue(
         np.all(network.activation_function(0.5) == 1 / (1 + np.exp(-0.5))))
Exemple #26
0
def main():
    """Main program execution"""

    # Dataset Loader
    dset_loader = None

    # Create the Dataset Loader if possible
    try:
        # Configure dataset loading and subdivision ==> USER
        validation_percent = 0
        limit_images = 0
        dset_loader = LoaderMnist("./mnist/train-images-idx3-ubyte",
                                  "./mnist/train-labels-idx1-ubyte",
                                  "./mnist/t10k-images-idx3-ubyte",
                                  "./mnist/t10k-labels-idx1-ubyte",
                                  validation_percent, limit_images)

    except RuntimeError as exception:
        print(exception)
        raise SystemExit

    # Create the Neural Network
    nnetwork = None
    try:
        # Create the network with its structure ==> USER
        nnetwork = nn.NeuralNetwork([784, 400, 256, 10])

    except RuntimeError as exception:
        print(exception)
        raise SystemExit

    # Configuration of the Network Hyperparameters ==> USER
    train_batch_size = 128
    test_batch_size = 128
    learning_rate = 0.005
    learning_rate_decay_steps = 100
    learning_rate_decay_amount = 0.98
    min_epochs_without_progress = 10

    # Apply the configuration
    nnetwork.ConfigureTraining(learning_rate, learning_rate_decay_steps,
                               learning_rate_decay_amount,
                               min_epochs_without_progress)

    # Start thensorflow session
    nnetwork.StartTraining()

    # Start epochs training
    while nnetwork.CheckTrainingEnd() is False:

        # Start the current Epoch
        nnetwork.EpochStart()

        # Train batches
        batch_completed = False
        while not batch_completed:
            # Execute a training step on the batch
            batch_completed, cur_batch = dset_loader.GetBatch(train_batch_size)
            nnetwork.RunTrainingStep(cur_batch)

    # Prediction of validation set
        if len(dset_loader.validation_set) > 0:
            nnetwork.AddTestResults(dset_loader.train_set,
                                    dset_loader.validation_set,
                                    test_batch_size)
        else:
            nnetwork.AddTestResults(dset_loader.train_set,
                                    dset_loader.test_set, test_batch_size)

        # Advance to next Epoch
        nnetwork.EpochEnd()

    # Close the Training Session
    nnetwork.StopTraining()

    # Display the training results
    nnetwork.DisplayResults()
    nnetwork.DisplayWeights(nnetwork.best_weights[0],
                            _width=28,
                            _height=28,
                            _plot_rows=20,
                            _plot_cols=20)
    nnetwork.DisplayWeights(nnetwork.best_weights[1],
                            _width=20,
                            _height=20,
                            _plot_rows=16,
                            _plot_cols=16)
    nnetwork.DisplayWeights(nnetwork.best_weights[2],
                            _width=16,
                            _height=16,
                            _plot_rows=2,
                            _plot_cols=5)

    return
def main():
    training_data_file_path = 'dataset/sonar.arff'
    data, meta = arff.loadarff(training_data_file_path)
    data = np.asarray(data.tolist())

    # Part B (1)
    print 'Epochs curve'
    skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=73)

    epochs = [25, 50, 75, 100]
    accuracy = []
    for epoch in epochs:
        predicted_confidences = np.zeros(len(data))
        fold_numbers = np.zeros(len(data), dtype=int)
        curr_fold_num = 0
        for train_indexes, test_indexes in skf.split(
                data[:, :-1].astype(float), data[:, -1]):
            fold_numbers[test_indexes] = curr_fold_num
            curr_fold_num += 1

            X_train, X_test = data[train_indexes, :-1].astype(float), data[
                test_indexes, :-1].astype(float)
            y_train, y_test = data[train_indexes, -1], data[test_indexes, -1]
            neural_net = neuralnet.NeuralNetwork(
                len(meta.names()) - 1,
                len(meta.names()) - 1, 1, 0.1, epoch)
            y_train = np.array(y_train, ndmin=2).T
            for _ in range(neural_net.epoch):
                train_data = np.concatenate((X_train, y_train), axis=1)
                np.random.shuffle(train_data)
                for i in range(len(train_data)):
                    neural_net.train(
                        train_data[i, :-1].astype(float),
                        [0.0] if train_data[i, -1] == 'Rock' else [1.0])
            # fold_accuracy = neural_net.test_neural_net(data, test_indexes, predicted_confidences)
            neural_net.test_neural_net(data, test_indexes,
                                       predicted_confidences)
            # print 'Fold accuracy:', fold_accuracy

        correct_preds = 0
        for i in range(len(data)):
            predicted_label = meta[meta.names(
            )[-1]][1][0] if predicted_confidences[i] < 0.5 else meta[
                meta.names()[-1]][1][1]
            actual_label = data[i, -1]
            if predicted_label == actual_label:
                correct_preds += 1
        # print '(', epoch, ',', correct_preds * 1.0 / len(data), ')'
        accuracy.append(correct_preds * 1.0 / len(data))
    plot_curve(epochs, accuracy, 'Epoch', 'Accuracy', 'Accuracy vs Epoch')

    # Part B (2)
    print 'Folds curve'
    num_folds = [5, 10, 15, 20, 25]
    accuracy = []
    for folds in num_folds:
        skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=73)
        predicted_confidences = np.zeros(len(data))
        fold_numbers = np.zeros(len(data), dtype=int)
        curr_fold_num = 0
        for train_indexes, test_indexes in skf.split(
                data[:, :-1].astype(float), data[:, -1]):
            fold_numbers[test_indexes] = curr_fold_num
            curr_fold_num += 1

            X_train, X_test = data[train_indexes, :-1].astype(float), data[
                test_indexes, :-1].astype(float)
            y_train, y_test = data[train_indexes, -1], data[test_indexes, -1]
            neural_net = neuralnet.NeuralNetwork(
                len(meta.names()) - 1,
                len(meta.names()) - 1, 1, 0.1, 50)
            y_train = np.array(y_train, ndmin=2).T
            for _ in range(neural_net.epoch):
                train_data = np.concatenate((X_train, y_train), axis=1)
                np.random.shuffle(train_data)
                for i in range(len(train_data)):
                    neural_net.train(
                        train_data[i, :-1].astype(float),
                        [0.0] if train_data[i, -1] == 'Rock' else [1.0])
            # fold_accuracy = neural_net.test_neural_net(data, test_indexes, predicted_confidences)
            neural_net.test_neural_net(data, test_indexes,
                                       predicted_confidences)
            # print 'Fold accuracy:', fold_accuracy

        correct_preds = 0

        for i in range(len(data)):
            predicted_label = meta[meta.names()[-1]][1][0] if predicted_confidences[i] < 0.5 else \
            meta[meta.names()[-1]][1][
                1]
            actual_label = data[i, -1]
            if predicted_label == actual_label:
                correct_preds += 1
        # print '(', folds, ',', correct_preds * 1.0 / len(data), ')'
        accuracy.append(correct_preds * 1.0 / len(data))
    plot_curve(num_folds, accuracy, 'Number of folds', 'Accuracy',
               'Accuracy vs Number of folds')

    # Part B (3)
    print 'ROC curve'
    skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=59)

    predicted_confidences = np.zeros(len(data))
    fold_numbers = np.zeros(len(data), dtype=int)
    curr_fold_num = 0
    for train_indexes, test_indexes in skf.split(data[:, :-1].astype(float),
                                                 data[:, -1]):
        fold_numbers[test_indexes] = curr_fold_num
        curr_fold_num += 1

        X_train, X_test = data[train_indexes, :-1].astype(float), data[
            test_indexes, :-1].astype(float)
        y_train, y_test = data[train_indexes, -1], data[test_indexes, -1]
        neural_net = neuralnet.NeuralNetwork(
            len(meta.names()) - 1,
            len(meta.names()) - 1, 1, 0.1, 50)
        y_train = np.array(y_train, ndmin=2).T
        for _ in range(neural_net.epoch):
            train_data = np.concatenate((X_train, y_train), axis=1)
            np.random.shuffle(train_data)
            for i in range(len(train_data)):
                neural_net.train(train_data[i, :-1].astype(float),
                                 [0.0] if train_data[i,
                                                     -1] == 'Rock' else [1.0])
        # fold_accuracy = neural_net.test_neural_net(data, test_indexes, predicted_confidences)
        neural_net.test_neural_net(data, test_indexes, predicted_confidences)
        # print 'Fold accuracy:', fold_accuracy

    roc_input = []
    for i in range(len(data)):
        predicted_label = meta[meta.names(
        )[-1]][1][0] if predicted_confidences[i] < 0.5 else meta[meta.names()
                                                                 [-1]][1][1]
        actual_label = data[i, -1]
        # print fold_numbers[i], predicted_label, actual_label, predicted_confidences[i]
        roc_input.append((predicted_confidences[i], actual_label))

    # Sort in decreasing order of positive confidence
    x_fpr_vals = []
    y_tpr_vals = []
    roc_input.sort(reverse=True)
    num_neg = len([val for val in roc_input if val[1] == 'Rock'])
    num_pos = len([val for val in roc_input if val[1] == 'Mine'])
    tp = fp = last_tp = 0
    for i in range(len(roc_input)):
        if i > 0 and roc_input[i][0] != roc_input[
                i - 1][0] and roc_input[i][1] == 'Rock' and tp > last_tp:
            x_fpr_vals.append(float(fp) / num_neg)
            y_tpr_vals.append(float(tp) / num_pos)
            last_tp = tp
        if roc_input[i][1] == 'Mine':
            tp += 1
        elif roc_input[i][1] == 'Rock':
            fp += 1
    # print '(', fp * 1.0 / num_neg, ',', tp * 1.0 / num_pos, ')'
    x_fpr_vals.append(float(fp) / num_neg)
    y_tpr_vals.append(float(tp) / num_pos)

    plot_curve(x_fpr_vals, y_tpr_vals, 'False Positive Rate',
               'True Positive Rate', 'ROC Curve')
Exemple #28
0
 def testTraining(self):
     # This test really just tests for crashes
     nn = neuralnet.NeuralNetwork(2, 2, 1)
     data = [((0, 0), 0), ((0, 1), 1), ((1, 0), 1), ((1, 1), 0)]
     nn.train_network(data)
Exemple #29
0
        encoding[label] = 1.
        new_Y.append(encoding)
    return np.array(new_Y)


#Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Flatten input
x_train = x_train.reshape(x_train.shape[0], -1)
x_test = x_test.reshape(x_test.shape[0], -1)

# Normalize data
x_train = np.array(x_train / 255.)
x_test = np.array(x_test / 255.)

#Convert target in one-hot encoding
y_train = one_hot(y_train)
y_test = one_hot(y_test)

model = nn.NeuralNetwork()
model.add(nn.Layer(64, activation='leakyrelu'))
model.add(nn.Layer(32, activation='tanh'))
model.add(nn.Layer(10, activation='softmax'))
stop = nn.EarlyStopping(patience=5, delta=0.1, restore_weights=True)
model.compile(epochs=10,
              learning_rate=1e-3,
              loss='categorical_cross_entropy',
              optimizer='nesterov',
              earlystop=stop)
model.fit(x_train, y_train, x_test, y_test, batch_size=64)
Exemple #30
0
 def __init__(self):
     self.best = Player.Player(nn.NeuralNetwork(5, 4, 3))
     self.doodler = []
     self.bestFitness = 0