Beispiel #1
0
def basic_conv(n=3, epochs=60):
    nets = []  # list of networks (for ensemble, if desired)
    for j in range(n):
        net = Network([
            ConvLayer(image_shape=(mini_batch_size, 1, 64, 512),
                      filter_shape=(20, 1, 3, 3),
                      stride=(1, 1),
                      activation_fn=relu),
            ConvPoolLayer(image_shape=(mini_batch_size, 20, 64, 512),
                          filter_shape=(40, 20, 3, 3),
                          stride=(1, 1),
                          poolsize=(2, 2),
                          activation_fn=relu),
            ConvPoolLayer(image_shape=(mini_batch_size, 40, 32, 256),
                          filter_shape=(80, 40, 3, 3),
                          stride=(1, 1),
                          poolsize=(2, 2),
                          activation_fn=relu),
            FullyConnectedLayer(n_in=80 * 16 * 128, n_out=100),
            SoftmaxLayer(n_in=100, n_out=2)
        ], mini_batch_size, 50)

        net.SGD(train_data,
                epochs,
                mini_batch_size,
                0.1,
                validation_data,
                test_data,
                lmbda=0.0)

        nets.append(net)  # Add current network to list
    return nets
Beispiel #2
0
def main():
    """use validation set to tune hyper parameters"""
    train, val, test = load_data_wrapper()
    model = Network([784, 60, 10])
    model.SGD(train, 30, 10, 2.5, val)
    print('Evaluation on test: {0} / {1}'.format(model.evaluate(test),
                                                 len(test)))
Beispiel #3
0
def train(hidden_layers):
    training_data, validation_data, test_data = mnist_loader.load_data_wrapper(
    )
    network = Network([784] + hidden_layers)
    network.SGD(training_data,
                epochs=30,
                learning_rate=10,
                mini_batch_size=3.0,
                test_data=test_data)
Beispiel #4
0
def main():
    """main logic"""
    # this load_data_wrapper function is modified version
    train, val, test = load_data_wrapper()

    model = Network([784, 30, 10])
    model.SGD(train, 30, 10, 3, val)
    print('Evaluation on test: {0} / {1}'.format(model.evaluate(test),
                                                 len(test)))
Beispiel #5
0
def main():
    training_data, validation_data, test_data = load_data_wrapper()
    epoch = 50
    net = Network([784, 40, 10])
    net.SGD(list(training_data), epoch, 20, 4.0, test_data=list(test_data))
    Epoch = np.arange(1, epoch + 1)
    Accuracy = np.array(net.test_result) / 100
    plt.plot(Epoch, Accuracy, '.')
    plt.show()
Beispiel #6
0
def run_network(training_data, validation_data, test_data):

    #~ net = Network([784, 30, 10])
    net = Network([784, 10])

    epochs = 30
    mini_batch_size = 10
    eta = 3.0  # learning rate
    net.SGD(training_data, epochs, mini_batch_size, eta, test_data=test_data)

    return 0
Beispiel #7
0
def main():
    train, val, test = load_data_wrapper()
    # no hidden layer
    print('=' * 20, 'model: no hidden layer', '=' * 20)
    model1 = Network([784, 10])
    model1.SGD(train, 30, 10, 3, val)
    print('Evaluation on test: {0} / {1}'.format(model1.evaluate(test),
                                                 len(test)))

    # small learning rate
    print('=' * 20, 'model: small learning rate', '=' * 20)
    model2 = Network([784, 30, 10])
    model2.SGD(train, 30, 10, 0.1, val)
    print('Evaluation on test: {0} / {1}'.format(model2.evaluate(test),
                                                 len(test)))

    # large learning rate
    print('=' * 20, 'model: large learning rate', '=' * 20)
    model3 = Network([784, 30, 10])
    model3.SGD(train, 30, 10, 30, val)
    print('Evaluation on test: {0} / {1}'.format(model3.evaluate(test),
                                                 len(test)))
Beispiel #8
0
def basic_conv(n=3, epochs=60):
    nets = []  # list of networks (for ensemble, if desired)
    for j in range(n):
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                          filter_shape=(20, 1, 5, 5), stride=(1, 1),
                          poolsize=(2, 2), activation_fn=relu),
            ConvPoolLayer(image_shape=(mini_batch_size, 20, 14, 14), 
                          filter_shape=(40, 20, 5, 5), stride=(1, 1),
                          poolsize=(2, 2), activation_fn=relu),
            FullyConnectedLayer(n_in=40*7*7, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
            
        net.SGD(training_data, epochs, mini_batch_size, 0.1,
                validation_data, test_data)
                
        nets.append(net)  # Add current network to list
    return nets
Beispiel #9
0
class NeuralNet1(Algorithm):
    def __init__(self, **hyperparas):
        self.hyperparas = hyperparas
        sizes = self.hyperparas.get('sizes', [784, 10])
        self.model = Network(sizes)

    def train(self):
        # get hyper parameters
        epochs = self.hyperparas.get('epochs', 30)
        batch_size = self.hyperparas.get('batch_size', 10)
        eta = self.hyperparas.get('eta', 1.0)
        # get data
        cwd = os.getcwd()
        os.chdir(NNDL_PATH)
        training_data, validation_data, test_data = load_data_wrapper()
        os.chdir(cwd)
        return self.model.SGD(training_data, epochs, batch_size, eta,
                              test_data)

    def classify(self, data):
        return self.model.feedforward(data)
Beispiel #10
0
def basic_conv(n=3, epochs=60):
    nets = []  # list of networks (for ensemble, if desired)
    for j in range(n):
        net = Network([
            ConvPoolLayer(image_shape=(mini_batch_size, 3, 32, 32), 
                          filter_shape=(32, 3, 3, 3), stride=(1, 1),
                          poolsize=(2, 2), activation_fn=relu),
            ConvPoolLayer(image_shape=(mini_batch_size, 32, 16, 16), 
                          filter_shape=(80, 32, 3, 3), stride=(1, 1),
                          poolsize=(2, 2), activation_fn=relu),
            ConvPoolLayer(image_shape=(mini_batch_size, 80, 8, 8), 
                          filter_shape=(128, 80, 3, 3), stride=(1, 1),
                          poolsize=(2, 2), activation_fn=relu),
            FullyConnectedLayer(n_in=128*4*4, n_out=100),
            SoftmaxLayer(n_in=100, n_out=10)], mini_batch_size)
            
        net.SGD(train_data, epochs, mini_batch_size, 0.01,
                validation_data, test_data)
                
        nets.append(net)  # Add current network to list
    return nets
Beispiel #11
0
import mnist_loader
from network import Network

training, validation, testing = mnist_loader.load_data_wrapper()
net = Network([784, 25, 10])
res = net.SGD(training, 20, 15, 2.5, testing)
print(res)
Beispiel #12
0
def main():
    tries = 0
    plays = 0
    db_good = []
    db_bad = []
    
    minesweeper = Minesweeper()
    
    # stores XY of mouse events
    mouse_x = 0
    mouse_y = 0
    
    while True:
        has_game_ended = False
        game_over = False

        minesweeper.new_game()
        
        tries += 1
        print('Jogo', tries)

        # For random training
        chosen_squares = []
        chosen_squares_length = 1

        # Main game loop
        while not has_game_ended:
            # Initialize variables
            mouse_clicked = False
            safe_squares = []
            flagged_squares = []
            new_position = False # For random training

            # Draw screen
            minesweeper.draw_field()

            # Get Fred's AI input
            if AI_TYPE == 'FRED':
                info = minesweeper.available_info()
                safe_squares, flagged_squares = minesweeper.get_AI_input(info)

            # Get random position
            if AI_TYPE == 'RANDOM':
                while not new_position:
                    choice = [[random.choice(range(FIELDWIDTH)),random.choice(range(FIELDHEIGHT))]]
                    if minesweeper.revealed_boxes[choice[0][0]][choice[0][1]] == False:
                        new_position = True
                safe_squares = choice

            # Get player input
            for event in pygame.event.get():
                if event.type == QUIT or (event.type == KEYDOWN and (event.key == K_ESCAPE or event.key == K_q)):
                    minesweeper.terminate()
                elif event.type == MOUSEMOTION:
                    mouse_x, mouse_y = event.pos
                elif event.type == MOUSEBUTTONDOWN:
                    if event.button == LEFT_CLICK:
                        mouse_x, mouse_y = event.pos
                        mouse_clicked = True
                        box_x, box_y = minesweeper.get_box_at_pixel(mouse_x, mouse_y)
                        if box_x is not None and box_y is not None:
                            safe_squares = [(box_x, box_y)]
                    if event.button == RIGHT_CLICK:
                        mouse_x, mouse_y = event.pos
                        box_x, box_y = minesweeper.get_box_at_pixel(mouse_x, mouse_y)
                        if box_x is not None and box_y is not None:
                            flagged_squares = [(box_x, box_y)]

            # Keeps track of chosen squares
            if len(safe_squares) > 0:
                chosen_squares.append(safe_squares)

            # Checks if game is over for AI
            if AI_TYPE != 'HUMAN':
                if game_over:
                    has_game_ended = True

            # Saves turn
            if TRAINING and chosen_squares_length == len(chosen_squares):
                turn = minesweeper.save_turn()

            # Apply game changes
            if not game_over:
                for x, y in flagged_squares:
                    minesweeper.toggle_flag_box(x, y)

                for x, y in safe_squares:
                    minesweeper._RESET_SURF, minesweeper._RESET_RECT = minesweeper.draw_smiley(WINDOWWIDTH/2, 50, 'check.png')
                    game_over = minesweeper.reveal_box(x, y)

            #Add play to DB
            if TRAINING and chosen_squares_length == len(chosen_squares) and len(safe_squares) > 0:
                chosen_squares_length += 1
                turn_chosen_square = minesweeper.save_chosen_square(safe_squares)
                if minesweeper.mine_field[safe_squares[0][0]][safe_squares[0][1]] != 'X':
                    db_good.append((np.asarray(turn),np.asarray(turn_chosen_square)))
                    #print('GOOD', db_good[-1])
                
                if minesweeper.mine_field[safe_squares[0][0]][safe_squares[0][1]] == 'X':
                    db_bad.append((np.asarray(turn),np.asarray(turn_chosen_square)))
                    #print('BAD', db_bad[-1])
                
                plays += 1            
                if plays > TRAINING_COMPLETE:
                    db_good = minesweeper.data_treat(db_good)
                    db_bad = minesweeper.data_treat(db_bad)
                    print('GOOD',db_good[0])
                    print('BAD',db_bad[0])
                    div = int(len(db_good)*.8)
                    training_data = db_good[:div]
                    test_data = db_good[div:]
                    net = Network([FIELDHEIGHT*FIELDWIDTH, 32, 16, FIELDHEIGHT*FIELDWIDTH])
                    print('Network training started...\n')
                    net.SGD(training_data, 30, 10, 0.1, lmbda=5.0)
                    net.save('ANN_test')

                    print('score = {:.2f} %'.format(net.accuracy(test_data)/100))
                    minesweeper.terminate()

            # Check if reset box is clicked
            if minesweeper._RESET_RECT.collidepoint(mouse_x, mouse_y):
                minesweeper.highlight_button(minesweeper._RESET_RECT)
                if mouse_clicked:
                    minesweeper.new_game()
                    has_game_ended = True

            # Highlight unrevealed box
            box_x, box_y = minesweeper.get_box_at_pixel(mouse_x, mouse_y)
            if box_x is not None and box_y is not None and not minesweeper.revealed_boxes[box_x][box_y]:
                minesweeper.highlight_box(box_x, box_y)

            # Update screen, wait clock tick
            pygame.display.update()
            minesweeper.clock.tick(FPS)
Beispiel #13
0
#     net = Network([784, 512, 512, 10], training_images[:10000], training_labels[:10000], test_images[:1000], test_labels[:1000], validation_images[:1000], validation_labels[:1000])
#     return net.SGD(20, eta, 128)

# from hyperopt import hp
# space = hp.uniform('eta', 0.0, 0.4)
# from hyperopt import fmin, tpe
# best = fmin(fn=optimize,
#     space=space,
#     algo=tpe.suggest,
#     max_evals=100)
# print(best)

# # Train network
net = Network([784, 512, 512, 10], training_images, training_labels,
              test_images, test_labels, validation_images, validation_labels)
net.SGD(35, 0.2, 128, monitor_text=True, optimizing=False)
# Best eta so far: 0.2
# Hyper-opt says: 0.38384960552488084

# Make 5 predictions using trained network for demonstration
images = np.zeros((5, 784))
guesses = []
confidences = []
for _ in range(5):
    i = random.randint(0, len(validation_images) - 1)
    images[_] = validation_images[i]
    output = net.feedforward(validation_images[i])
    guess = np.argmax(output)
    confidence = round((output[0][guess] / np.sum(output)) * 100.0, 2)
    guesses.append(guess)
    confidences.append(confidence)
Beispiel #14
0
test_data = [(x.reshape(x.shape[0], 1), y) for x, y in zip(x_test, y_test)]

if __name__ == "__main__":

    import sys

    from network import Network

    args = sys.argv

    args.pop(0)

    mini_batch_size = int(args[-3])
    epoch = int(args[-2])
    learning_rate = float(args[-1])

    net = Network([784] + list(map(int, args[0:-3])) + [10])
    print(
        f"Network training started with\n"
        f"input layer    = 784 neurons,\n"
        f"output layer   =  10 neurons,\n"
        f"{len(args[0:-3])} hidden layers  = {', '.join(args[0:-3])} neurons\n"
        f"mini batch size = {mini_batch_size}\nepochs = {epoch}\n"
        f"learning rate = {learning_rate}\n")

    net.SGD(training_data,
            epoch,
            mini_batch_size,
            learning_rate,
            test_data=test_data)
Beispiel #15
0
from network import cost_function
import numpy as np

nn = Network([3, 2, 2])
nn.biases = [np.array([[-1], [1]]), np.array([[1], [1]])]
nn.weights = [
    np.array([[1, -1, 1], [-1, 1, -1]]),
    np.array([[0.1, -0.5], [0.5, 0.6]])
]

x = np.array([[1], [2], [3]])
y = np.array([[0], [1]])

x1 = np.array([[0.9], [0], [1]])
y1 = np.array([[1], [0]])

t = x, y
t1 = x1, y1
t = [t, t1]

before = nn.feedforward(x)
print(before, cost_function(nn, t))

nn.SGD(training_data=t, epochs=1000, mini_batch_size=2, eta=1)

after = nn.feedforward(x)
after1 = nn.feedforward(x1)
print(after)
print(after1)
print(cost_function(nn, t))
Beispiel #16
0
def main():
    """"""
    train, val, test = load_data_wrapper()
    model = Network([784, 60, 10])
    model.SGD(train, 30, 10, 3, val)
    print('Evaluation on test: {0} / {1}'.format(model.evaluate(test), len(test)))
Beispiel #17
0
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 19 2017
Neural network example
"""
import numpy as np

import mnist_loader
from helpers import png2input
from network import Network

np.random.seed(420)

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
test_data = list(test_data)

nn = Network([784, 30, 10])
nn.SGD(training_data, 30, 10, 2.5, test_data=test_data)
nn.save('data/example.npz')
# nn.load('data/example.npz')

print('score = {:.2f} %'.format(nn.evaluate(test_data)/100))


pix_data = png2input('data/example.png')

results = nn.feedforward(pix_data)
print('You wrote: {}'.format(np.argmax(results)))
for i in range(10):
    print('{}: {:4.1f} %'.format(i, 100*results[i][0]))
Beispiel #18
0
            vectorized_y[idx][label] = 1

        data = []
        for x, y in zip(xs, vectorized_y):
            data.append((x.reshape(-1, 1).astype(float), y.reshape(-1, 1)))
    else:
        data = []
        for x, y in zip(xs, ys):
            data.append((x.reshape(-1, 1).astype(float), y.reshape(-1, 1)))

    return data


train = mnist.train_images()[:-10000], mnist.train_labels()[:-10000]
validation = mnist.train_images()[-10000:], mnist.train_labels()[-10000:]
test = mnist.test_images(), mnist.test_labels()

train = format_data(*train)
validation = format_data(*validation, vector_y=False)
test = format_data(*test, vector_y=False)

# Build the network
net = Network([784, 50, 50, 10])

# Train the network
net.SGD(train, 30, 10, 3, validation)

# Test the network
print("Test data: {:3.2f}% correct".format(100 * net.evaluate(test) /
                                           len(test)))
Beispiel #19
0
from network import Network
import numpy as np
import mnist_data_loader

training_set, validation_set, test_set = mnist_data_loader.load_data_customized(
)

net = Network([784, 30, 10])
#print('self.weights shape is ', net.weights[1].shape)

net.SGD(training_set, 30, 10, 3.0, test_set)
Beispiel #20
0
    e[0][j] = 1.0
    return e


def mse(output, target):
    error = 0.0
    target = vectorized_result(target)
    for i in range(len(output[0])):
        error += pow(abs(output[0][i] - target[0][i]), 2)
    return error / 2


training_inputs, training_results, validation_inputs, validation_results, test_inputs, test_results = load_data_wrapper(
)
n = Network([784, 300, 10])
n.SGD(training_inputs, training_results, 0.3, 5000)

totalerror = 0

for i in range(10000):
    k = np.random.randint(len(validation_inputs))
    result = n.process(validation_inputs[k])
    erro = mse(result, validation_results[k])
    totalerror += erro
    print("Erro = {}".format(erro))
    print("CASE #{}:\n input[{}] = {} \nExpected = {}\n\n".format(
        i, k, result, validation_results[k]))

avgerror = totalerror / 10000
right = 100 - avgerror * 100
print("Taxa de acerto: {}\nErro Médio: {}".format(right, avgerror))
Beispiel #21
0
import numpy as np
from network import Network

data = np.loadtxt("data.csv", delimiter=",")

test_index = np.random.choice([True, False],
                              len(data),
                              replace=True,
                              p=[0.25, 0.75])
test = data[test_index]
train = data[10:40]
train = [(d[:3][:, np.newaxis], np.eye(3, 1, k=-int(d[-1]))) for d in train]
test = [(d[:3][:, np.newaxis], d[-1]) for d in test]
input_count = 3  # 3 нейрона входного слоя
hidden_count = 6  # 5 нейронов внутреннего слоя
output_count = 3
nn = Network([input_count, hidden_count, output_count])
print(train[0])
nn.SGD(training_data=train, epochs=1, mini_batch_size=1, eta=1)
Beispiel #22
0
                
            net = Network([
                ConvPoolLayer(activation_fn=ReLU, image_shape=(mini_batch_size, 1, rows, cols), 
                            filter_shape=(64, 1, 5, 10), 
                            poolsize=(2, 2)),  
                ConvPoolLayer(activation_fn=ReLU,image_shape=(mini_batch_size, 64, 11, 79), 
                            filter_shape=(64, 64, 4, 10), 
                            poolsize=(2, 2)),
                ConvPoolLayer(activation_fn=ReLU,image_shape=(mini_batch_size, 64, 4, 35), 
                            filter_shape=(64, 64, 3, 10),
                            poolsize=(2,2)),  
                FullyConnectedLayer(activation_fn=ReLU, n_in=64*1*13, n_out=dim1, p_dropout=dropout),
                FullyConnectedLayer( n_in=dim1, n_out=dim2, activation_fn=ReLU, p_dropout=dropout),
                SoftmaxLayer(n_in=dim2, n_out=2, p_dropout=dropout)], mini_batch_size, c_prob)
                 
        net.SGD(train, num_epochs, mini_batch_size, 0.0005, 
            validation, test, lmbda=0.1, patience=patience)
            
        net_best_accuracy.append(net.best_accuracy)
        net_best_roc.append(net.best_auc_roc)
        net_best_precision.append(net.best_precision)
        net_best_recall.append(net.best_recall)
        net_best_f_score.append(net.best_f_score)
        net_best_predictions.append(net.best_predictions)
        net_best_probs.append(net.best_prob)
        save_network(net.best_state, dir)
        
#Record Results

dir = "../data/" + dset + "/data_sets/"    
f = open(dir + "/" + prefix + "results_PopPhy.txt", 'w')
f.write("Mean Accuracy: " + str(np.mean(net_best_accuracy)) + " (" + str(np.std(net_best_accuracy)) + ")\n")
import mnist_loader
from network import Network

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = Network([784, 30, 10], random_init=False)

net.SGD(training_data=training_data,
        epochs=30,
        mini_batch_size=10,
        eta=3.0,
        test_data=test_data,
        random_shuffle=True)
Beispiel #24
0
output_neurons = int(input("Enter number of output neurons "))

#create neural network
neural = Network([input_neurons, hidden_neurons, output_neurons])
neural = Network([1300, 500, 50, 3])
#print "neural weights :",neural.weights
#print "neural biases :", neural.biases

#training_data = create_training_data("nn_data.txt")
#testing_data = create_training_data("nn_data.txt")

# 2-n-2 Architecture
#training_data = [(numpy.array([[1],[0]]),numpy.array([[0],[1]])),(numpy.array([[1],[1]]),numpy.array([[1],[0]])),(numpy.array([[0],[0]]),numpy.array([[1],[0]])),(numpy.array([[0],[1]]),numpy.array([[0],[1]]))]
#testing_data = [(numpy.array([[0],[0]]),numpy.int(0)),(numpy.array([[1],[1]]),numpy.int(0)),(numpy.array([[1],[0]]),numpy.int(1)),(numpy.array([[0],[1]]),numpy.int(1))]

#2-n-1 Architecture
#training_data = [(numpy.array([[1],[0]]),numpy.array([[1]])),(numpy.array([[1],[1]]),numpy.array([[0]])),(numpy.array([[0],[0]]),numpy.array([[0]])),(numpy.array([[0],[1]]),numpy.array([[1]]))]
#testing_data = [(numpy.array([[0],[0]]),numpy.int(0)),(numpy.array([[1],[1]]),numpy.int(0)),(numpy.array([[1],[0]]),numpy.int(1)),(numpy.array([[0],[1]]),numpy.int(1))]
'''
from truth_table import xor_data_generator

training_data, testing_data = xor_data_generator(input_neurons,80)
#print training_data
#print testing_data
'''

training_data, testing_data = create_training_data("nn_data.txt", 3, 80)

print testing_data
neural.SGD(training_data, 100000, len(training_data), 0.2, testing_data)
Beispiel #25
0
def main():
    """ Executes the required calculations for an event, prints raw data and creates a graph. """
    args = sys.argv
    global ax1

    if len(args) <= 1 or not args[1].isdigit():
        print("Usage: python train.py NUM_CORES")
        exit()

    num_cores = int(args[1])

    #fig = plt.figure(figsize=(7, 7))
    fig2 = plt.figure(figsize=(7, 7))
    fig3 = plt.figure(figsize=(7, 7))
    #ax1 = fig.add_subplot(111)
    plt.ion()
    network_size = [LightCurve.INPUT_SIZE, 8, 8, LightCurve.OUTPUT_SIZE]
    nn = Network(network_size)
    recent_progress = []
    types = [MicroLensing, NonEvent, Periodic]
    labels = [
        'AC_std', 'AC_max', 'SYM_std', 'SYM_max', 'excursion_diff',
        'excursion_above', 'excursion_below', 'noise', 'slope', 'power_peak',
        'power_mean'
    ]
    labels += ["" for i in range(sum(network_size[1:-1]))]
    labels += [ev().__class__.__name__ for ev in types]

    q = mp.Queue(maxsize=3000)
    pool = mp.Pool(num_cores,
                   initializer=generation_process,
                   initargs=(q, types))
    accuracies = []
    total_gen = 0
    iterations = 0

    rolling_accuracies = [[], [], []]
    batch_size = 1000
    batches_per_save = 30

    while True:
        training_data = [q.get() for _ in range(batch_size)]
        total_gen += len(training_data)
        iterations += 1
        #draw_plot(get_event(types))
        _, _, _, training_accuracy = nn.SGD(np.array(training_data),
                                            10,
                                            250,
                                            0.35,
                                            monitor_training_accuracy=True)

        avg_acc = sum(training_accuracy) / len(training_accuracy) / len(
            training_data)
        accuracies.insert(0, avg_acc)
        accuracies = accuracies[:300]
        sys.stdout.flush()

        if iterations % batches_per_save == 0:
            ax2 = fig2.gca()
            draw_neural_net(ax2, .05, .95, .08, .98, nn, None, labels)

            avg_acc_300 = sum(accuracies) / len(accuracies)
            avg_acc_150 = sum(accuracies[:150]) / min(len(accuracies), 150)
            avg_acc_50 = sum(accuracies[:50]) / min(len(accuracies), 50)

            rolling_accuracies[0].append(round(100 * avg_acc_300, 2))
            rolling_accuracies[1].append(round(100 * avg_acc_150, 2))
            rolling_accuracies[2].append(round(100 * avg_acc_50, 2))

            txt = ax2.text(0.5,
                           0.06,
                           "Current accuracy: " +
                           str(round(100 * avg_acc_150, 2)) + "%",
                           color="#000000FF",
                           ha='center',
                           va='center')
            txt = ax2.text(0.5,
                           0.03,
                           "Total lightcurves generated: " +
                           human_format(total_gen),
                           color="#000000FF",
                           ha='center',
                           va='center')
            txt = ax2.text(
                0.5,
                0,
                datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                color="#666666FF",
                ha='center',
                va='center')
            nn.save(datetime.datetime.now().strftime("NN-%Y-%m-%d.json"))

            ax3 = fig3.gca()

            accuracy_x = np.linspace(
                0,
                len(rolling_accuracies[0]) * batch_size * batches_per_save,
                len(rolling_accuracies[0]))

            ax3.plot(accuracy_x,
                     rolling_accuracies[0],
                     color=(0, 0, 1, 1),
                     label='Last 300K')
            ax3.plot(accuracy_x,
                     rolling_accuracies[1],
                     color=(0, 0, 1, 0.5),
                     label='Last 150K')
            ax3.plot(accuracy_x,
                     rolling_accuracies[2],
                     color=(0, 0, 1, 0.25),
                     label='Last 50K')
            ax3.legend()
            ax3.set_xlabel("Lightcurves Processed")
            ax3.set_ylabel("Average Accuracy (%)")
            ax3.set_title("Network Accuracy")

            fig3.savefig('accuracy.png')
            fig3.clf()

            fig2.savefig('nn.png')
            fig2.clf()
Beispiel #26
0
from network import Network
from cost_function import CrossEntropyCost
from activation_function import Sigmoid
import numpy as np
import mnist_loader

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = Network([784, 30, 10], CrossEntropyCost, Sigmoid)
net.SGD(training_data, 30, 10, 0.1, 5.0, 0.2, test_data)
#measure execution time

start = time.time()

#train a new network using test and validation data

net = Network(network.getLayers(20, 0.5), writer, 20, "./network")
net.MBSGD(training_data, validation_data, test_data, 16, 0.1, 0.1, 0.008,
          False)
net.save()
"""
#optimize parameters to a network setup with validation data

optimize(training_data, validation_data, test_data, writer)
"""
"""
#load the network 'network' and train it for 16 epochs, then save it as 'network2'

net = network.load("./network")
net.writer = writer
net.SGD(training_data, validation_data, test_data, 16, 0.1, 0.1, 0.008, False)
net.save("./network2")
"""
"""
#load these networks and do an ensemble classification on the provided data, storing wrong classifications as .csv

ensemble(["./Nets/storedNet1", "./Nets/storedNet2", "./Nets/storedNet3", "./Nets/storedNet4", "./Nets/multitrain1", "./Nets/obstructed_final",
          "./Nets/loweredcontrast_final", "./Nets/blurred_final", "./Nets/obstructed_longTraining", "./Nets/obstructed_final_2",
          "./Nets/contrastlowered_final_2", "./Nets/blurred_final_2"], test_data, test_names)
"""
    # 	FullyConnectedLayer(n_in = nFilters*pooledImageSize**2, n_out = gridsize**2)],mini_batch_size)

    #2 Layer FC Net
    # net = Network([
    # 	FullyConnectedLayer(n_in = imagesize**2, n_out = 30),FullyConnectedLayer(n_in = 30, n_out = 9) ], mini_batch_size)

    #1 Layer FC Net
    net = Network([FullyConnectedLayer(n_in=imagesize**2, n_out=gridsize**2)],
                  mini_batch_size)

    #Attempt Stochastic Gradient Descent
    #-----------------------------------
    weights, biases = net.SGD(training_data,
                              nEpochs,
                              mini_batch_size,
                              eta,
                              validation_data,
                              test_data,
                              lmbda=lmb)

    for N, w in enumerate(weights):
        print('Weights for layer %d have shape:' % N)
        print(w.shape)
    for N, b in enumerate(biases):
        print('Biases for layer %d have shape:' % N)
        print(b.shape)

    #Save data
    #---------

    #Save results as a dictionary!
Beispiel #29
0
# ----------------------
# - network.py example:
import network
from network import Network, ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer  # softmax plus log-likelihood cost is more common in modern image classification networks.

# read data:
training_data, validation_data, test_data = network.load_data_shared()
# mini-batch size:
mini_batch_size = 10

from network import ReLU
net = Network([
    ConvPoolLayer(image_shape=(mini_batch_size, 1, 28, 28),
                  filter_shape=(20, 1, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    ConvPoolLayer(image_shape=(mini_batch_size, 20, 12, 12),
                  filter_shape=(40, 20, 5, 5),
                  poolsize=(2, 2),
                  activation_fn=ReLU),
    FullyConnectedLayer(n_in=40 * 4 * 4, n_out=100, activation_fn=ReLU),
    SoftmaxLayer(n_in=100, n_out=10)
], mini_batch_size)
net.SGD(training_data,
        60,
        mini_batch_size,
        0.03,
        validation_data,
        test_data,
        lmbda=0.1)
Beispiel #30
0
from network import Network
import mnist_loader
import matplotlib.cm as cm
import matplotlib.pyplot as plt

training_data, validation_data, test_data = mnist_loader.load_data_wrapper()

net = Network([784, 16, 10])
print("Created network")
net.SGD(training_data, 30, 10, 1.0, test_data=test_data)

raw_pixels, pixel_vector, expected = mnist_loader.load_random_image()


def toNumber(array):
    array = array.reshape(10)
    max = 0
    for pos in range(1, 10):
        if (array[pos] > array[max]):
            max = pos
    return max


result = net.feedforward(pixel_vector)
print(toNumber(result))
print("Is correct? {}".format(expected == toNumber(result)))

plt.imshow(raw_pixels.reshape((28, 28)), cmap=cm.Greys_r)
plt.show()