Beispiel #1
0
def test_hyperparameters():
    f = open("Results.txt", "w")

    training_data = data_setup.get_training_data()
    dev_data = data_setup.get_dev_data()

    learning_rates = [0.01, 0.1, 0.3, 0.5, 0.7, 0.9]
    hidden_neurons = [2, 4, 6, 8, 10]
    batch_sizes = [128, 256]

    results = []

    counter = 0
    for i in range(len(learning_rates)):
        for j in range(len(hidden_neurons)):
            for k in range(len(batch_sizes)):
                counter += 1
                print("Training Combo: " + str(counter))
                wine_net = None
                wine_net = network.NeuralNetwork(learning_rates[i],
                                                 hidden_neurons[j])
                wine_net.train(training_data,
                               batch_size=batch_sizes[k],
                               num_epochs=1000)
                preds = wine_net.get_predictions(dev_data)

                MSE = 0
                for x in range(len(preds)):
                    MSE += (float(preds[x][0]) - float(preds[x][1]))**2
                RMSE = math.sqrt(MSE / len(preds))
                results.append([
                    learning_rates[i], hidden_neurons[j], batch_sizes[k], RMSE
                ])

                f.write("Learning Rate: " + str(learning_rates[i]) +
                        ", Hidden Neurons: " + str(hidden_neurons[j]) +
                        ", Batch Sizes: " + str(batch_sizes[k]) + "\n")
                f.write("RMSE: " + str(RMSE) + "\n\n")
                print("Learning Rate: " + str(learning_rates[i]) +
                      ", Hidden Neurons: " + str(hidden_neurons[j]) +
                      ", Batch Sizes: " + str(batch_sizes[k]) + "\n")
                print("RMSE: " + str(RMSE) + "\n\n")

    f.close()
    return results
Beispiel #2
0
def learnNetwork(image="DATA/mask/05_h.jpg",
                 mask="DATA/mask/05_h.tif",
                 mode='create',
                 old_weights='model_w_new_128.h5',
                 new_weights='model_w_new_128.h5',
                 epochs=1,
                 channel=1,
                 gray=False,
                 dim=(128, 128, 1)):

    IMAGE_PATH = image
    MASK_PATH = mask
    WEIGHTS_PATH = old_weights
    NEW_WEIGHTS_PATH = new_weights
    EPOCH_NUMBER = epochs
    CHANNEL = channel
    GREY = gray
    MODE = mode

    model = net.NeuralNetwork()
    extractor = de.DataExtractor()

    if MODE == 'create':
        model.create(dim)
    elif MODE == 'learn':
        model.load(dim, WEIGHTS_PATH)
    else:
        print('Bad mode. End of script...')
        sys.exit()

    train = extractor.extractData(IMAGE_PATH,
                                  MASK_PATH,
                                  channel=CHANNEL,
                                  shape=dim,
                                  grey=GREY)

    model.learn(train, epoch=EPOCH_NUMBER)
    model.save_weights(NEW_WEIGHTS_PATH)

    del train
    del model

    gc.collect()
    def __init__(self, width, height, title):
        """
        Initializer
        """        
        super().__init__(width, height, title)
        file_path = os.path.dirname(os.path.abspath(__file__))
        os.chdir(file_path)

        self.MAX_TIME = 5
        # Sprite lists
        self.wall_list = None
        self.player_list = None
        self.player_tmp  = []
        self.score = 0
        self.score_timeout = 0
        self.generations = 0
        self.frame = 0

        # Set up the player
        self.player_sprite = None
        self.physics_engine = None

        # Create layer 1/2 (6 inputs, each with 6 neurons)
        self.layer1 = rnn.NeuronLayer(5, 6)
        self.layer2 = rnn.NeuronLayer(6, 4)
        self.perceptron = rnn.NeuralNetwork(self.layer1, self.layer2)
        self.genoma_list = []

        self.better           = copybetter()       
                                # 6 imputs, 6 neuro, 4 saidas               
        self.better.weights1  = 2 * random.random((5, 6)) - 1 if len(Tool.read('better.weights1.txt')) <= 1 else Tool.read('better.weights1.txt')
        self.better.weights2  = 2 * random.random((6, 4)) - 1 if len(Tool.read('better.weights2.txt')) <= 1 else Tool.read('better.weights2.txt')
        self.better.reward    = 0 if len(Tool.read('better.reward.txt')) <= 1 else Tool.read('better.reward.txt')[0]

        #neuron
        self.better_count = 0
        self.neuron_action = [0, 0]
        self.index = 0
        self.lines_action = [0, 0, 0, 0]

        self.grid   = Util.grid 
        self.reward = Util.reward          
def main():
    
    # loading features and labels
    X, y = util.load_data()
    
    # no of nodes in each layer first(784) and last(10) are fixed for this digit-recognization-problem.
    layers_size = [784, 300, 60, 10]

    # intializing
    nn = network.NeuralNetwork(layers_size)
    
    # training the network
    nn.fit(X, y, ephocs=5)

    # some example predictions
    preds = [7, 3425, 14634, 27345, 38234]
    for i in preds:

        # if you want to see predicting digit, set 'show=' flag as True 
        print('[main]: actual: ',np.argmax(y[i])," | network predicted: ",nn.predict(X[i], show=False))

    # for predicting kaggle test_set and saving.
    saving_file_name = "numpy_nn_submission.csv"
    predict_for_kaggle_test_set(nn=nn, filename=saving_file_name)
Beispiel #5
0
# This line is creating the neural network. The constructor has 5 arguments:
#  - template  In this list you can select the amount of neurons in
#              each component.  Default - [0]
#  - syn_prc   By changing this parameter you can change the amount of synapses
#              in your network - from default 100% (each with each) to 1%
#              (each neuron has one input and one output). So, if the value is
#              small, the network works faster, but the error is bigger.
#              Default - 100
#  - co        Learning coefficient. Some sort of a bias.  Default = 0.7
#  - nmin      The minimum value, that the network will get/give
#              (positive only).  Default - 0
#   -nmax      The maximum value, that the network will get/give
#              (positive only).  Default - 1
nw = network.NeuralNetwork(template=[1, 4, 9, 10, 1],
                           syn_prc=90,
                           co=0.7,
                           nmin=0,
                           nmax=3000)

# Now we are training the network. The 'train' method has 5 arguments:
#  - filename  This is the name of the file with training data
#              (has to be in the current directory).  Default - 'train.txt'
#  - verbose   If true, this will show you the training process
#              (percentage, time left, etc.).  Default - False
#  - rep       Repetitions. The number of times that the network will train on
#              the given file. More repetitions - better result.  Default - 1
#
# In the training file one line is one task. First template[0] numbers are the
# input numbers, and the rest template[-1] numbers are the desired outputs.
nw.train(filename="train.txt", verbose=True, rep=10)
from optimizer import Gradientdescent
N_train, N_test = 400, 50

x_train, x_test = x[:N_train], x[N_train:N_train + N_test]
y_train, y_test = x[1:N_train + 1], x[1 + N_train:1 + N_train + N_test]

#layers = [ nw.RnnLayer(1,5, activation="tanh"),
#         nw.RnnLayer(5,2, activation="tanh"),
#        nw.RnnLayer(2,1, activation=None)]

#layers = [ nw.RnnLayer(1,2, activation=None), nw.TempAffineLayer(2,1) ]
layers = [nw.LSTMLayer(1, 1), nw.TempAffineLayer(1, 1)]

net = nw.NeuralNetwork(layers,
                       Lossl2,
                       optimizer=Gradientdescent(alpha=0.03,
                                                 decay_rate=0.99,
                                                 decay_step=200))
net.train(x_train, y_train, max_iter=1000, print_every=100, batch_size=None)

layers[0].print()
print("Wx+Wh=", layers[0].Wx + layers[0].Wh)

x_pre = np.zeros(N_train + N_test)

x_pre[:N_train] = net.predict((x[:N_train]).reshape(-1)).reshape(-1)
for i in range(N_test):
    x_pre[i + N_train] = net.predict(x[i + N_train - 1]).reshape(-1)

plt.plot(x_pre, c='red')
plt.plot(x[1:1 + N_train + N_test], c='black')
Beispiel #7
0
@author: Michał
"""
import gc
import numpy as np
import network as net
import DataExtractor as de
from skimage import io

dim = (8, 8, 1)

if dim[2] == 1:
    g = True
else:
    g = False

model = net.NeuralNetwork()
extractor = de.DataExtractor()

trainTemp = extractor.extractData("DATA/mask/01_h.jpg",
                                  "DATA/mask/01_h.tif",
                                  shape=dim,
                                  grey=g)

train = (np.reshape(trainTemp[0],
                    (trainTemp[0].shape[0], dim[2], dim[0], dim[1])),
         trainTemp[1])

del trainTemp
model.create(size=dim)
model.learn(train, epoch=10)
Beispiel #8
0
def run_timed():
    nn = network.NeuralNetwork(784, 10)
    print 'accuracy: %s' % train_and_test(nn, 1000)
Beispiel #9
0
def run_default():
    nn = network.NeuralNetwork(784, 10)
    print 'accuracy: %s' % train_and_test(nn)
Beispiel #10
0
 def test_activation(self):
     network = nt.NeuralNetwork(3, 2, 1, 0.5)
     # Test that the activation function is a sigmoid
     self.assertTrue(
         np.all(network.activation_function(0.5) == 1 / (1 + np.exp(-0.5))))
def getLearnRate():
    learnRate = 0
    while learnRate <= 0:
        learnRate = float(input("Enter the learn rate for the model: "))
    return learnRate


print("Welcome! Let's create and train your neural network to correctly \n \
	classify digits from the MNIST database of images.")

layerCount = getLayerCount()
layerSizes = getLayerSizes(layerCount)
layerSizes.insert(0, 784)
layerSizes.append(10)
net = network.NeuralNetwork(layerSizes)

trainingData, validationData, testData = datasetLoader.load_data_wrapper()

net.train(trainingData,
          getEpochs(),
          getMiniBatchSize(),
          getLearnRate(),
          testData=testData)
print(net.predict(testData[204]))

net.storeParameters("myParams.json")

loadedNet = network.loadNetwork("myParams.json")

loadedNet.train(trainingData,
import network
import load
from activation import *

train, val, test = load.load_data()

my_network = network.NeuralNetwork(
    [28 * 28, 100, 50, 10], [Relu(), Sigmoid(), Tanh()])
my_network.train(train, 0.01, 30, 20, 0, val, "save.pkl")

print("Test accuracy:{0}".format(my_network.eval(test)))

new_network = network.NeuralNetwork(
    [28 * 28, 100, 50, 10], [Relu(), Sigmoid(), Tanh()])
new_network.restore("save.pkl")
Beispiel #13
0
MAX_BOARD = 20
board = [[0 for i in range(MAX_BOARD)] for j in range(MAX_BOARD)]
isAround = [[False for i in range(MAX_BOARD)] for j in range(MAX_BOARD)]
## for debug
# isAround[int(20 / 2) - 1][int(20 / 2) - 1] = True
# isAround[int(20 / 2)][int(20 / 2) + 1] = True
# isAround[5][8] = True
# print(isAround)
isStarted = True
epsilon = 0.05
gamma = 0.5
firstPlayer = "10"
activePlayer = "10"
win_pattern = "11111"
currentEmbed = None
nn = network.NeuralNetwork(input_nodes=218, hidden_nodes=60, output_nodes=1, learning_rate=0.05)


def actionNetwork():
	# given x_t, get action u_t
	resultAction = None
	selectable = []
	# if isStarted == True:

	threatAction, prior = threatMain(copy.deepcopy(board))
	logging.debug("our " + str(threatAction) + "|" + str(prior))
	enemy_threat_action, enemy_prior = threatMain(copy.deepcopy(board), reverse=True)
	# enemy_threat_action, enemy_prior = None, None
	logging.debug("enemy " + str(enemy_threat_action) + "|" + str(enemy_prior))

	skiprandom = False
Beispiel #14
0
import network
import collect
import showImage


training_data, validation_data, test_data , training_data_orig = collect.load_data()

print("len training data:",len(training_data[0]))

images = showImage.get_images(training_data_orig) 

# showImage.plot_images_together(images)
showImage.plot_box_images(images)
showImage.plotDigit(images[0])


net = network.NeuralNetwork([784, 30, 10])
net.fit(training_data, validation_data)

        ball = poolTable.balls[i]
        inputs[2 * i] = ball.pos.x
        inputs[2 * i + 1] = ball.pos.y
    return inputs


MAX_SHOTS = 100
TIME_STEP = 0.01

brains = []
setup = network.NeuralNetworkInputs()
setup.numberOfInputs = 32
setup.numHiddenLayer = 8
setup.numberOfOutputs = 2
for i in range(10):
    brains.append(network.NeuralNetwork(setup))

for i in range(len(brains)):
    print("Training brain:", i)
    poolTable = pool.PoolTable()

    brain = brains[i]
    shots = 0
    while shots < MAX_SHOTS:
        poolTable.update(TIME_STEP)
        if (poolTable.hasFinished()):
            inputs = getInputs(poolTable)
            outputs = brain.evaluate(inputs)
            power = outputs[0] * 2
            angle = outputs[1] * math.pi * 2
            print("Taking shot", shots, "with", power, "power and", angle,
#!/usr/bin/python3

import network
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg

nn = network.NeuralNetwork()
img = mpimg.imread('image1900.jpg')
newgray = mpimg.imread('profile90gray.jpg')
# print(img[0])

gray = nn.train(img)

# plt.imshow(gray, cmap='gray')
# plt.show()

newimg = nn.colorize(newgray)

plt.imshow(newimg)
plt.show()
Beispiel #17
0
 def test_run(self):
     # Test correctness of run method
     network = nt.NeuralNetwork(3, 2, 1, 0.5)
     network.weights_input_to_hidden = test_w_i_h.copy()
     network.weights_hidden_to_output = test_w_h_o.copy()
     self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
Beispiel #18
0
def train_backprop(weights, heights, genders):
    """Trains neural network"""

    # creating networks
    generation = []
    for i in range(POPULATION_SIZE):
        new_network = network.NeuralNetwork(HIDDEN_LAYERS,
                                            HIDDEN_LAYER_NEURONS)
        generation.append(new_network)

    # minimal error starts at 1.0 at first and gets smaller later
    minimal_error = 1.0

    for iteration in range(ITERATIONS):

        # calculating errors of other networks
        network_errors_mean = calculate_errors(weights, heights, genders,
                                               generation)

        # calculating errors of first network
        errors = []
        for sample_i in range(len(weights)):

            # calculating error
            network_error = calculate_sample_error(sample_i, weights, heights,
                                                   genders, generation[0])
            errors.append(network_error)

            # backpropagation
            if not RANDOM_SAMPLE:
                backpropagate(sample_i, genders, generation[0])

        # taking one sample for backpropagation
        if RANDOM_SAMPLE:
            sample_index = random.randrange(0, len(weights))
            backpropagate(sample_index, genders, generation[0])

        network_errors_mean[0] = np.mean(errors)

        backpropagated_network = generation[0]

        # sorting list
        for i in range(POPULATION_SIZE):
            generation[i].error = network_errors_mean[i]
        generation.sort(key=lambda x: x.error)

        if generation[0] == backpropagated_network:
            color = (0, 0.75, 0, 1)
        else:
            color = (1, 0, 0, 1)

        # updating minimal error
        if (generation[0].error < minimal_error):
            minimal_error = generation[0].error

        # creating new generation
        generation = create_generation(generation[0])

        # outputting results
        output(iteration, minimal_error, color, generation[0])

        if check_stop_conditions(minimal_error, weights):
            break

    print()

    # returning best network
    return generation[0]