Esempio n. 1
0
 def test_1_hidden_6n_xor(self):
     X=[[0,0],[0,1],[1,0],[1,1]]
     Y=[[0],[1],[1],[0]]
     nn=NN([2,6,1],verbose=0,learning_rate=0.1)
     nn.train(X,Y,TRIALS)
     report=nn.get_report(X,Y)
     self.assertEqual(report["errors"],[])
Esempio n. 2
0
 def test_3_bool_batch(self):
     X,Y,a,b=get_data_1csv("tests/3bools.csv",1)
     bs=100
     nn=NN([3,10,1],verbose=0,learning_rate=0.1/bs)
     nn.train(X,Y,TRIALS,batch_size=bs)
     report=nn.get_report(X,Y)
     self.assertEqual(report["errors"],[])
Esempio n. 3
0
    def test_6_bools(self):
        X,Y,a,b=get_data_1csv("tests/6bools.csv",1)

        nn=NN([6,36,1],verbose=0,learning_rate=0.1)
        nn.train(X,Y,TRIALS)
        report=nn.get_report(X,Y)
        self.assertEqual(report["errors"],[])
Esempio n. 4
0
    def show_images() -> None:
        random.seed(10)

        dp = DataProvider.load_from_folder(dataset_folder)

        nn = NeuralNet(sizes=[784, 128, 10], epochs=10)
        nn.train(dp.get_train_x(), dp.get_hot_encoded_train_y(),
                 dp.get_test_x(), dp.get_hot_encoded_test_y())

        properly_classified, misclassified = nn.get_properly_classified_and_misclassified_images(
            dp.get_test_x(), dp.get_hot_encoded_test_y())

        print('properly classified')
        plt.imshow(properly_classified[0].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(properly_classified[1].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(properly_classified[2].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(properly_classified[3].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(properly_classified[4].reshape(28, 28), cmap=cm.binary)
        plt.show()

        print('missclasified')
        plt.imshow(misclassified[0].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(misclassified[1].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(misclassified[2].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(misclassified[3].reshape(28, 28), cmap=cm.binary)
        plt.show()
        plt.imshow(misclassified[4].reshape(28, 28), cmap=cm.binary)
        plt.show()
Esempio n. 5
0
    def __init__(self,
                 batch_size,
                 memory_capacity,
                 num_episodes,
                 learning_rate_drop_frame_limit,
                 target_update_frequency,
                 seeds=[104, 106, 108],
                 discount=0.99,
                 delta=1,
                 model_name=None,
                 visualize=False):

        self.env = CarEnvironment(seed=seeds)
        self.architecture = NeuralNet()
        self.explore_rate = Basic_Explore_Rate()
        self.learning_rate = Basic_Learning_Rate()
        self.model_path = os.path.dirname(
            os.path.realpath(__file__)) + '/models/' + model_name
        self.log_path = self.model_path + '/log'
        self.visualize = visualize
        self.damping_mult = 1

        self.initialize_tf_variables()

        self.target_update_frequency = target_update_frequency
        self.discount = discount
        self.replay_memory = Replay_Memory(memory_capacity, batch_size)
        self.training_metadata = Training_Metadata(
            frame=0,
            frame_limit=learning_rate_drop_frame_limit,
            episode=0,
            num_episodes=num_episodes)

        self.delta = delta
        document_parameters(self)
Esempio n. 6
0
 def test_with_wrong_input_size(self):
     self.assertRaises(NeuralNet.BadArchitecture,
                       lambda: NeuralNet(input_size=0))
     self.assertRaises(NeuralNet.BadArchitecture,
                       lambda: NeuralNet(input_size=-1))
     self.assertRaises(NeuralNet.BadArchitecture,
                       lambda: NeuralNet(input_size=-10))
Esempio n. 7
0
    def test_6_bools(self):
        X, Y, a, b = get_data_1csv("tests/6bools.csv", 1)

        nn = NN([6, 36, 1], verbose=0, learning_rate=0.1)
        nn.train(X, Y, TRIALS)
        report = nn.get_report(X, Y)
        self.assertEqual(report["errors"], [])
Esempio n. 8
0
class Main:
    def __init__(self):
        self.neural_net = NeuralNet()
        self.connection = pika.BlockingConnection(
            pika.ConnectionParameters("localhost"))

    def connect(self):
        channel = self.connection.channel()
        channel.queue_declare(queue="neural_net")

        channel.basic_consume(self.callback, queue="neural_net", no_ack=True)

        print("[*] Waiting for messages. To exit press CTRL+C")
        channel.start_consuming()

    def callback(self, channel, method, properties, body):
        print("[x] Received %r" % body)

        parsed_body = json.loads(body)

        if isinstance(parsed_body, list):
            self.neural_net.update_training_set(body)
            self.neural_net.train()
        else:
            request = parsed_body['request']
            result = self.neural_net.predict(request)
            self.send_prediction(result)

    def send_prediction(self, result):
        print("Send %r" % result)
        channel = self.connection.channel()
        channel.queue_declare(queue="results")
        channel.basic_publish(exchange='', routing_key='', body=result)
Esempio n. 9
0
 def test_3_bool_batch(self):
     X, Y, a, b = get_data_1csv("tests/3bools.csv", 1)
     bs = 100
     nn = NN([3, 10, 1], verbose=0, learning_rate=0.1 / bs)
     nn.train(X, Y, TRIALS, batch_size=bs)
     report = nn.get_report(X, Y)
     self.assertEqual(report["errors"], [])
Esempio n. 10
0
 def test_1_hidden_6n_xor(self):
     X = [[0, 0], [0, 1], [1, 0], [1, 1]]
     Y = [[0], [1], [1], [0]]
     nn = NN([2, 6, 1], verbose=0, learning_rate=0.1)
     nn.train(X, Y, TRIALS)
     report = nn.get_report(X, Y)
     self.assertEqual(report["errors"], [])
Esempio n. 11
0
    def test_3_outputs_2_bools(self):
        X, Y, a, b = get_data_1csv("tests/3outputs2bools.csv", 1)

        nn = NN([2, 50, 3], verbose=0, learning_rate=0.1)
        nn.train(X, Y, TRIALS)
        report = nn.get_report(X, Y)
        self.assertEqual(report["errors"], [])
Esempio n. 12
0
    def test_3_outputs_2_bools(self):
        X,Y,a,b=get_data_1csv("tests/3outputs2bools.csv",1)

        nn=NN([2,50,3],verbose=0,learning_rate=0.1)
        nn.train(X,Y,TRIALS)
        report=nn.get_report(X,Y)
        self.assertEqual(report["errors"],[])
Esempio n. 13
0
 def test_1_hidden_2n_xor(self):
     X=[[0,0],[0,1],[1,0],[1,1]]
     Y=[[0],[1],[1],[0]]
     nn=NN([2,2,1],verbose=0,learning_rate=0.03,final_learning_rate=0.001)
     nn.train(X,Y,30000)
     report=nn.get_report(X,Y)
     self.assertEqual(report["errors"],[])
Esempio n. 14
0
    def __init__(self, model_location, name):
        self._name = name
        self._neural_net = NeuralNet(cached_model=model_location)

        # Run prediction on random data to make sure that code path is executed at least once before the game starts
        random_input_data = np.random.rand(PLANET_MAX_NUM, PER_PLANET_FEATURES)
        predictions = self._neural_net.predict(random_input_data)
        assert len(predictions) == PLANET_MAX_NUM
Esempio n. 15
0
 def test_3output_dimensions(self):
     nn = NN((2, 2, 3), verbose=0)
     inputs = ([1, 0], [0, 1], [1, 1], [0, 0])
     for i in inputs:
         nn.forward(i)
         self.assertEqual(nn.outputs[0].shape, (3, 1))
         self.assertEqual(nn.outputs[1].shape, (3, 1))
         self.assertEqual(nn.outputs[2].shape, (3, 1))
         self.assertEqual(nn.get_output().shape, (3, ))
Esempio n. 16
0
    def test_weight_shapes(self):
        learning_rate = 0.8
        structure = {'num_inputs': 2, 'num_outputs': 1, 'num_hidden': 5}
        candidate = NeuralNet(structure, learning_rate)

        cand_weights = candidate.get_weights()

        self.assertEqual(cand_weights[0].shape, (3, 5))
        self.assertEqual(cand_weights[1].shape, (5, 1))
Esempio n. 17
0
    def example3():
        """
        Neural net with 1 hidden layer 100 epochs test
        """
        dp = DataProvider.load_from_folder(dataset_folder)

        nn = NeuralNet(sizes=[784, 128, 10], epochs=100)
        nn.train(dp.get_train_x(), dp.get_hot_encoded_train_y(),
                 dp.get_test_x(), dp.get_hot_encoded_test_y())
Esempio n. 18
0
 def __init__(self, listpos, direction, genome):
     self.listpos = listpos
     self.direction = direction  # 0 = NORTH 1 = SOUTH 2 = EAST 3 = WEST
     self.genome = genome
     self.color = genome[0]
     self.neurons = self.genome
     del self.neurons[0]
     self.net = NeuralNet(self.neurons)
     self.energy = 40000
Esempio n. 19
0
 def test_3output_dimensions(self):
     nn=NN((2,2,3),verbose=0)
     inputs=([1,0],[0,1],[1,1],[0,0])
     for i in inputs:
         nn.forward(i)
         self.assertEqual(nn.outputs[0].shape,(3,1))
         self.assertEqual(nn.outputs[1].shape,(3,1))
         self.assertEqual(nn.outputs[2].shape,(3,1))
         self.assertEqual(nn.get_output().shape,(3,))
Esempio n. 20
0
    def __init__(self, colour):
        self.isPlacing = True
        self.board = self.initialiseBoard()
        self.neuralNet = NeuralNet()

        if colour.upper() == "BLACK":
            self.colour = BLACK
        else:
            self.colour = WHITE
Esempio n. 21
0
 def test_1_hidden_2n_xor(self):
     X = [[0, 0], [0, 1], [1, 0], [1, 1]]
     Y = [[0], [1], [1], [0]]
     nn = NN([2, 2, 1],
             verbose=0,
             learning_rate=0.03,
             final_learning_rate=0.001)
     nn.train(X, Y, 30000)
     report = nn.get_report(X, Y)
     self.assertEqual(report["errors"], [])
Esempio n. 22
0
 def example5():
     """
     Tests number of neurons in first hidden layer influence on accuracy of classification;
     neural net with 1 hidden layer; 10 epochs
     """
     neurons_number = [196, 128, 98, 64, 32, 16]
     dp = DataProvider.load_from_folder(dataset_folder)
     for n in neurons_number:
         nn = NeuralNet(sizes=[784, n, 10])
         nn.train(dp.get_train_x(), dp.get_hot_encoded_train_y(),
                  dp.get_test_x(), dp.get_hot_encoded_test_y())
Esempio n. 23
0
    def test_forward_propagate(self):
        learning_rate = 0.8
        structure = {'num_inputs': 2, 'num_outputs': 1, 'num_hidden': 1}
        candidate = NeuralNet(structure, learning_rate)

        x = np.array([1, 0])
        cand_out = candidate.forward_propagate(x)

        expected_result = .500615025728

        print(cand_out)
        self.assertAlmostEqual(cand_out, expected_result, 4)
Esempio n. 24
0
def main():
    with gzip.open('../files/mnist.pkl.gz', 'rb') as f:
        train_set, valid_set, test_set = cPickle.load(f, encoding="bytes")
    nn = NeuralNet([784, 100, 10], [i for i in range(10)],
                   last_activation=softmax,
                   optimize_weights_init=True)
    nn.train_optimized(train_set,
                       learning_rate=0.01,
                       batch_size=100,
                       iterations=200,
                       test_data=valid_set,
                       save=True)
    print(nn.test_accuracy(valid_set))
Esempio n. 25
0
def test_networks(filenames, xs, ys):
    from neural_net import NeuralNet

    N, D = xs.shape
    Ny, M = ys.shape
    assert N == Ny

    nn = NeuralNet(input_dim=D, output_dim=M)
    mpes = []
    filenames = sorted(filenames)
    for filename in filenames:
        nn.load(filename)
        mpes.append(mean_percent_error(nn, xs, ys))

    return mpes, filenames
Esempio n. 26
0
    def test_metrics() -> None:
        random.seed(10)

        dp = DataProvider.load_from_folder(dataset_folder)

        nn = NeuralNet(sizes=[784, 128, 10], epochs=10)
        nn.train(dp.get_train_x(), dp.get_hot_encoded_train_y(),
                 dp.get_test_x(), dp.get_hot_encoded_test_y())

        scores = nn.compute_metrics(dp.get_test_x(),
                                    dp.get_hot_encoded_test_y())
        print('precyzja_makro: ', scores['precyzja_makro'])
        print('czulosc_makro: ', scores['czulosc_makro'])
        print('dokladnosc: ', scores['dokladnosc'])
        print('raport: ', scores['raport'])
        print('macierz_bledow: ', scores['macierz_bledow'])
Esempio n. 27
0
def cross_validation(dataset,
                     percentage_train,
                     iterations,
                     iterations_per_iteration,
                     hidden_layers_sizes,
                     neurons_type='sigmoid',
                     alpha=0.0001,
                     lamb=0.0):
    """
    :param dataset: the full dataset
    :param percentage_train: float, percentage of instances that needs to go to the test partition
    :param iterations: int, number of holdouts to execute
    should call holdout to generate the train and test dicts
    should call train_NN to train the dataset
    should call test_NN to get the performances
    :return: (average_performance, stddev_performance)
    """
    accuracies = []
    precisions = []
    recalls = []
    for it in range(1, iterations + 1):
        #print("Iteration",it)
        train_dataset, test_dataset = holdout(dataset, percentage_train)
        input_size = len(list(train_dataset.keys())[0])
        output_size = len(list(train_dataset.values())[0])
        nn = NeuralNet(input_size, output_size, hidden_layers_sizes,
                       neurons_type, alpha, lamb)
        train_NN(nn, train_dataset, iterations_per_iteration)
        accuracy, precision, recall = test_NN(nn, test_dataset)
        accuracies.append(accuracy)
        precisions.append(precision)
        recalls.append(recall)
    return mean(accuracies), stdev(accuracies), mean(precisions), stdev(
        precisions), mean(recalls), stdev(recalls)
Esempio n. 28
0
    def test_created_net_has_correct_layer_sizes(self):
        nnet = NeuralNet.create_from_file(fname=self.test_file)
        self.assertEqual(nnet.layer_sizes(), self.net_params['layer_sizes'])

        net_params = {
            'layer_sizes': [1, 1, 1],
            'layers': [{
                'weights': [[3]],
                'biases': [5]
            }, {
                'weights': [[1]],
                'biases': [1]
            }]
        }
        self.make_temp_params_file(net_params)
        nnet = NeuralNet.create_from_file(fname=self.test_file)
        self.assertEqual(nnet.layer_sizes(), self.net_params['layer_sizes'])
Esempio n. 29
0
def part_b(file_n):
    f = open(file_n, "r")
    ip = int(f.readline())
    op = int(f.readline())
    batch = int(f.readline())
    n = int(f.readline())
    h = (f.readline()).rstrip().split(" ")
    h = map(int, h)
    h = [ip] + h + [op]
    if f.readline() == "relu\n":
        non_lin = 1
    else:
        non_lin = 0
    if f.readline() == "fixed\n":
        eta = 0
    else:
        eta = 1
    print ip, op, batch, n
    print h
    print non_lin, eta
    start = timeit.default_timer()
    net = NeuralNet(h, bool(non_lin))
    net.grad_des(x[:, 0:-1], x[:, -1], batch, bool(eta))
    stop = timeit.default_timer()
    t_acc = 100 * net.score(x[:, 0:-1], x[:, -1])
    ts_acc = 100 * net.score(tests[:, 0:-1], tests[:, -1])
    print "Train accuracy ", t_acc
    print "Test accuracy ", ts_acc
    print "Training time ", (stop - start)
    conf = confusion_matrix(tests[:, -1].tolist(), net.pred(tests[:, 0:-1]))
    plot_confusion(conf, list(set(tests[:, -1].flatten().tolist())),
                   "For layers " + str(h))
Esempio n. 30
0
def part_d(eta_a=False, rlu=False):
    tt = np.zeros((5, 2))
    m = 0
    h = [85, 0, 0, 10]
    l = [5, 10, 15, 20, 25]
    for i in [5, 10, 15, 20, 25]:
        print "For 2 layer ", i, eta_a, rlu
        h[1] = i
        h[2] = i
        start = timeit.default_timer()
        net = NeuralNet(h, rlu)
        net.grad_des(x[:, 0:-1], x[:, -1], 100, eta_a)
        stop = timeit.default_timer()
        t_acc = 100 * net.score(x[:, 0:-1], x[:, -1])
        ts_acc = 100 * net.score(tests[:, 0:-1], tests[:, -1])
        f_ptr.write("\nFor double layer " + str(eta_a) + str(rlu))
        f_ptr.write(str(i))
        f_ptr.write("\nTraining acc ")
        f_ptr.write(str(t_acc))
        f_ptr.write("\nTesting acc ")
        f_ptr.write(str(ts_acc))
        f_ptr.write("\nTrainig time ")
        f_ptr.write(str(stop - start))
        print "Train accuracy ", t_acc
        print "Test accuracy ", ts_acc
        print "Training time ", (stop - start)
        tt[m, 0] = t_acc
        tt[m, 1] = ts_acc
        m = m + 1
        conf = confusion_matrix(tests[:, -1].tolist(), net.pred(tests[:,
                                                                      0:-1]))
        plot_confusion(conf, list(set(tests[:, -1].flatten().tolist())),
                       "For 2 layers " + str(h) + str(eta_a) + str(rlu))
    print tt
    plot_metric(tt, l, "For two hidden layers " + str(eta_a) + str(rlu))
Esempio n. 31
0
def main():
    parser = argparse.ArgumentParser(description="Halite II training")
    parser.add_argument("--model_name", help="Name of the model")
    parser.add_argument("--minibatch_size", type=int, help="Size of the minibatch", default=100)
    parser.add_argument("--steps", type=int, help="Number of steps in the training", default=100)
    parser.add_argument("--data", help="Data directory or zip file containing uncompressed games")
    parser.add_argument("--cache", help="Location of the model we should continue to train")
    parser.add_argument("--games_limit", type=int, help="Train on up to games_limit games", default=1000)
    parser.add_argument("--seed", type=int, help="Random seed to make the training deterministic")
    parser.add_argument("--bot_to_imitate", help="Name of the bot whose strategy we want to learn")
    parser.add_argument("--dump_features_location", help="Location of hdf file where the features should be stored")

    args = parser.parse_args()

    # Make deterministic if needed
    if args.seed is not None:
        np.random.seed(args.seed)
    nn = NeuralNet(cached_model=args.cache, seed=args.seed)

    if args.data.endswith('.zip'):
        raw_data = fetch_data_zip(args.data, args.games_limit)
    else:
        raw_data = fetch_data_dir(args.data, args.games_limit)

    data_input, data_output = parse(raw_data, args.bot_to_imitate, args.dump_features_location)
    data_size = len(data_input)
    training_input, training_output = data_input[:int(0.85 * data_size)], data_output[:int(0.85 * data_size)]
    validation_input, validation_output = data_input[int(0.85 * data_size):], data_output[int(0.85 * data_size):]

    training_data_size = len(training_input)

    # randomly permute the data
    permutation = np.random.permutation(training_data_size)
    training_input, training_output = training_input[permutation], training_output[permutation]

    print("Initial, cross validation loss: {}".format(nn.compute_loss(validation_input, validation_output)))

    curves = []

    for s in range(args.steps):
        start = (s * args.minibatch_size) % training_data_size
        end = start + args.minibatch_size
        training_loss = nn.fit(training_input[start:end], training_output[start:end])
        if s % 25 == 0 or s == args.steps - 1:
            validation_loss = nn.compute_loss(validation_input, validation_output)
            print("Step: {}, cross validation loss: {}, training_loss: {}".format(s, validation_loss, training_loss))
            curves.append((s, training_loss, validation_loss))

    cf = pd.DataFrame(curves, columns=['step', 'training_loss', 'cv_loss'])
    fig = cf.plot(x='step', y=['training_loss', 'cv_loss']).get_figure()

    # Save the trained model, so it can be used by the bot
    current_directory = os.path.dirname(os.path.abspath(__file__))
    model_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + ".ckpt")
    print("Training finished, serializing model to {}".format(model_path))
    nn.save(model_path)
    print("Model serialized")

    curve_path = os.path.join(current_directory, os.path.pardir, "models", args.model_name + "_training_plot.png")
    fig.savefig(curve_path)
Esempio n. 32
0
    def test_created_net_biases(self):
        nnet = NeuralNet.create_from_file(fname=self.test_file)
        biases = nnet.biases()

        expected_biases1 = self.net_params['layers'][0]['biases']
        expected_biases2 = self.net_params['layers'][1]['biases']

        self.assertEqual(biases[0].tolist(), expected_biases1)
        self.assertEqual(biases[1].tolist(), expected_biases2)
Esempio n. 33
0
class Cell():
    def __init__(self, listpos, direction, genome):
        self.listpos = listpos
        self.direction = direction  # 0 = NORTH 1 = SOUTH 2 = EAST 3 = WEST
        self.genome = genome
        self.color = genome[0]
        self.neurons = self.genome
        del self.neurons[0]
        self.net = NeuralNet(self.neurons)
        self.energy = 40000

    def display(self):
        self.x = (self.listpos % columns) * size
        self.y = math.floor(self.listpos / rows) * size
        pygame.draw.rect(screen, self.color, (self.x, self.y, size, size))

    def run(self):
        self.net.neuron_check(self.listpos)
Esempio n. 34
0
    def test_created_net_weights(self):
        nnet = NeuralNet.create_from_file(fname=self.test_file)
        weights = nnet.weights()

        expected_weights1 = self.net_params['layers'][0]['weights']
        expected_weights2 = self.net_params['layers'][1]['weights']

        self.assertEqual(weights[0].tolist(), expected_weights1)
        self.assertEqual(weights[1].tolist(), expected_weights2)
Esempio n. 35
0
    def __init__(self, layer_units):
        num_layers = len(layer_units)
        rbms = []
        for i in range(num_layers - 1):
            rbms.append(RBM(layer_units[i], layer_units[i + 1]))

        self.layer_units = layer_units
        self.num_layers = num_layers
        self.rbms = rbms
        self.ann = NeuralNet(layer_units + list(reversed(layer_units))[1:])
 def __init__(self,
              untrained_net=NeuralNet(1024),
              training_data=Data(TRAINING_DATASET_DIRECTORY),
              validation_data=Data(VALIDATION_DATASET_DIRECTORY)):
     self.NN = untrained_net
     self.network_backup = untrained_net
     self.training_data = training_data
     self.validation_data = validation_data
     self.m_loss = []
     self.accuracy = []
Esempio n. 37
0
    def __init__(self, x_pos, y_pos, ball, game, four_player=False):
        self.bounds = pygame.Rect(x_pos, y_pos, 15, 100)
        self.ball = ball
        self.game = game
        self.four_player = four_player
        # self.net = NeuralNet(4, 1, 3)
        self.net = NeuralNet(3, 1, 3)
        self.generation = 0
        self.score = 0
        self.fitness = 0
        self.contacts_ball = 0
        self.name = self.random_name()
        self.parents = []

        self.colors = None
        self.color_ndx = 0
        self.seizure_reduction = 0
        self.seize_rate = random.uniform(0, 15)
        self.set_colors()
Esempio n. 38
0
 def test_backward(self):
     X=[[0,0],[0,1],[1,0],[1,1]]
     Y=[[0],[1],[1],[0]]
     nn=NN([2,2,1],verbose=0)
     for i in range(4):
         nn.forward(X[i])
         nn.backward(Y[i])
         self.assertEqual(nn.outputs[0].shape,(3,1))
         self.assertEqual(nn.outputs[1].shape,(3,1))
         self.assertEqual(nn.outputs[2].shape,(1,1))
         self.assertEqual(nn.get_output().shape,(1,))
Esempio n. 39
0
# Choose cost function
cost = "mse"
#cost = "entropy"

# Set plotting parameters
img_file = 'nnet_errorrate_cost.png'
plot_step = 200
pred_epochs = np.arange(0, x_train.shape[0], plot_step)

# Initialize output file for results
output_file = open("./results/results_" + cost + ".txt", "wb")
dash = 25

# Initialize NN classifier
network = NeuralNet(n_inputs, n_hidden, n_outputs, cost=cost, gamma=gamma)

output_file.write(dash * "-" + "\n")
output_file.write("Gamma:" + str(gamma) + "\n")
output_file.write("Cost:" + cost + "\n")
output_file.write(dash * "-" + "\n")

# Fit classifier on training data
start = time.time()
network.fit(x_train, y_train, pred_epochs=pred_epochs, max_epoch=max_epoch)
output_file.write("Training time:" + str(np.around((time.time() - start) / 60., 1)) + "minutes\n")
output_file.write(dash * "-" + "\n")

# Plot training error and error rate

plt.plot(network.pred_errors, '-g', label='Error Rate')
Esempio n. 40
0
import time
from neural_net import Connection, Aggregator, NeuralNet

c = Connection('postgresql', 'terriergen', 'avahi-daemon', '172.16.10.67', 5432, 'terriergen')
a = Aggregator()
nn = NeuralNet(a, c)

#nn.crossValidation("out.json") # 82.88%

nn.trainFromFile("out.json")
print "Exporting to file"
nn.exportToFile("nn.trained")
Esempio n. 41
0

vectorizer = ImageVectorizer()
paths = vectorizer.get_image_paths()

############################################################
################### Assignment Questions ###################
############################################################

############################################################
# Train a feedforward neural network with one hidden layer #
# of size 3 to learn representations of those digits.      #
# Try using (a) Linear transform function                  #
############################################################ 

net_3lin = NeuralNet(3, 'linear')
net_3lin.train(paths)
weights = net_3lin.input_weights_of_hidden_layer()
vectorizer.vectors_to_images(weights, '3_hidden_layer_linear')

############################################################
# (b) Sigmoid transform function for the hidden layer      #
############################################################

net_3sig = NeuralNet(3, 'sigmoid')
net_3sig.train(paths)
weights = net_3sig.input_weights_of_hidden_layer()
vectorizer.vectors_to_images(weights, '3_hidden_layer_sigmoid')

############################################################
# Change the size of hidden layer to 6 and retrain         #
Esempio n. 42
0
class AutoEncoder:
    """
    Autoencoders, also called autoassociators or Diabolo networks are used to learn
    efficient coding of high-dimensional data. They are used to learn the distribution
    of inputs itself.

    Intuitively speaking, they try to find out structures within the data
    to effectively represent it in lower dimensions while maintaining the ability
    to reproduce it. They produce similar results as to PCA with only one layer.

    Autoencoders have structure of neural networks and are fully connected. The number of neurons
    in the output and the input layer are the same, intuitively. Back-propagation can be used
    to learn the weights and biases but given the high depth, gradients reaching the lower layers
    almost vanish, thus making it learn only the averaged out outputs.

    The way to handle it is to first pre-train it as layers of RBMs. Then use the weights and biases
    as the initial weights for the neural networks and then use back-propagation to fine-tune.

    A detailed introduction to autoencoders is available in this paper by Youshua Bengio
    http://www.iro.umontreal.ca/~lisa/pointeurs/TR1312.pdf
    """
    def __init__(self, layer_units):
        num_layers = len(layer_units)
        rbms = []
        for i in range(num_layers - 1):
            rbms.append(RBM(layer_units[i], layer_units[i + 1]))

        self.layer_units = layer_units
        self.num_layers = num_layers
        self.rbms = rbms
        self.ann = NeuralNet(layer_units + list(reversed(layer_units))[1:])

    def train_as_rbm(self, data, max_epoch=50001, threshold=0.005,
                     grad_threshold=0.0001, learning_rate=0.1):
        """
        Pre-training the autoencoder network as stacked RBM.
        Uses contrastive divergence on all layers one-by-one, treating them as RBMs.
        Solves problems of vainishing gradient and local minimas for training as
        neural networks.

        @type   data:           numpy matrix
        @param  data:           matrix containing the set of inputs to train on.
        @type   max_epochs:     int
        @param  max_epochs:     maximum iterations to run
        @type   threshold:      float
        @param  threshold:      threshold for error to stop training
        @type   grad_threshold: float
        @param  grad_threshold: threshold for gradient's norm to stop training
        @type   learning_rate:  float
        @type   learning_rate:  learning rate to use
        @rtype                  None
        """
        layer_units = self.layer_units
        num_layers = self.num_layers
        rbms = self.rbms
        layer_data = data
        for i in range(num_layers - 1):
            layer_rbm = rbms[i]
            layer_rbm.train(layer_data, max_epoch, threshold, grad_threshold,
                            learning_rate)
            layer_data = layer_rbm.run_visible(layer_data)

    def run_rbms(self, data, give_probs=False):
        """
        Runs all the RBMs forward and backward to get outputs.

        @type   data:       numpy matrix
        @param  data:       inputs to train on
        @type   give_probs: bool
        @param  give_probs: whether to run backwards as probability instead of
                            stochastic binary
        @rtype              tuple of list of numpy matrices
        @return             contains all layers' outputs, forward and backward
        """
        layer_units = self.layer_units
        num_layers = self.num_layers

        rbms = self.rbms
        outputs = [data]
        for i in range(num_layers - 1):
            layer_rbm = rbms[i]
            outputs.append(layer_rbm.run_visible(outputs[-1]))

        reverse_outputs = [outputs[-1]]
        for i in range(num_layers - 2, -1, -1):
            layer_rbm = rbms[i]
            hidden_output = layer_rbm.run_hidden(reverse_outputs[-1], give_probs)
            reverse_outputs.append(hidden_output)

        return (outputs, reverse_outputs)

    def rbm_to_neural_net(self):
        """
        Use the weights from RBMs to initialize a neural network
        Weights are used such that they are mirrored after the middle layer,
        formed by the highest level RBM.
        Forward biases from RBMs are used in lower layers
        and backward ones in higher layers.

        @rtype          None
        """
        ann_weights = []
        ann_biases = []

        for rbm in self.rbms:
            ann_weights.append(rbm.weights[1:, 1:])
            ann_biases.append(rbm.weights[0:1, 1:])

        for rbm in reversed(self.rbms):
            ann_weights.append(numpy.transpose(rbm.weights[1:, 1:]))
            ann_biases.append(numpy.transpose(rbm.weights[1:, 0:1]))

        self.ann.weights = ann_weights
        self.ann.biases = ann_biases

    def train_as_neural_net(self, data, max_epoch=50001, threshold=0.01,
                            grad_threshold=0.0001,  learning_rate=0.05,
                            weight_decay=0.001, target_sparsity=0.5,
                            sparsity_weight=0.001):
        """
        Runs back-propagation treating the net as a neural network using back-propagation.
        Generally to be used after pre-training as RBMs, to avoid local minima and
        vanishing gradient issues.

        @type   data:               numpy matrix
        @param  data:               inputs to train on
        @type   max_epoch:          int
        @param  max_epoch:          max number of iterations for training
        @type   threshold:          float
        @param  threshold:          error threshold to stop training at
        @type   grad_threshold:     float
        @param  grad_threshold:     threshold for gradient's norm to stop training
        @type   learning_rate:      float
        @param  learning_rate:      learning rate for training
        @type   weight_decay:       float
        @param  weight_decay:       penalty for high weights
        @type   target_sparsity:    float
        @param  target_sparsity:    value for sparsity to be matched
        @type   sparsity_weight:    float
        @param  sparsity_weight:    weight for sparsity term
        @rtype                      None
        """
        self.ann.train(data, data, max_epoch, threshold, grad_threshold,
                       learning_rate, weight_decay,
                       target_sparsity, sparsity_weight)

    def run_neural_net(self, data):
        """
        Runs a forward step treating net as neural net

        @type   data:   numpy matrix
        @param  data:   input to run on
        @rtype          numpy matrix
        @return         output of the net
        """
        return self.ann.forward_step(data)

    def get_encoding(self, data):
        """
        Gets the encoding from the middle layer of net

        @type   data:   numpy matrix
        @param  data:   input to run on
        @rtype          numpy matrix
        @return         encoding vector
        """

        weights = self.ann.weights
        biases = self.ann.biases
        forward = data

        for i in range(self.num_layers - 1):
            weight_sum = numpy.dot(forward, weights[i]) + biases[i]
            forward = scipy.special.expit(weight_sum)

        return forward
Esempio n. 43
0
                pass
            PLAYER *= -1
    wins = p1
    losses = p2
    draws = n - (wins + losses)
    # print "one player done"
    return wins, losses, draws


if __name__ == '__main__':
    PLAYER = 1
    p1 = 0
    p2 = 0
    n = 1000
    for i in range(n):
        nn_player = NeuralNet()
        PLAYER = 1
        game = Game()
        while game.check_status() == 10:  # The game continues
            av = game.available_moves()
            # print av
            if PLAYER == -1:
                move = nn_player.forward_pass(game.linear_board())
                # print move
            else:
                move = random.choice(av)
            # print move, "MOVE"
            game.play(move, PLAYER)
            status = game.check_status()
            if status == 1:
                # print "Player 1 has won"
Esempio n. 44
0
def main(args):
    if not args["--random"]:
        random.seed(123)
        np.random.seed(123)

    train_csv=args["<train-csv>"]
    if not is_file(train_csv):
        return

    prediction_csv=args["<prediction-csv>"]
    if not is_file(prediction_csv):
        return

    target=args["<target-csv>"]
    if target and not is_file(target):
        return

    try:
        trials=int(args["--trials"])
    except ValueError:
        print_color("Bad value for trials.",COLORS.RED)
        return

    try:
        batch_size=int(args["--batch"])
    except ValueError:
        print_color("Bad value for batch.",COLORS.RED)
        return

    try:
        learn_rate=float(args["--learn-rate"])
    except ValueError:
        print_color("Bad value for learn rate.",COLORS.RED)
        return

    try:
        final_learn_rate=float(args["--final-learn-rate"])
    except ValueError:
        print_color("Bad value for final learn rate.",COLORS.RED)
        return

    try:
        interval=int(args["--timer"])
    except ValueError:
        print_color("Bad value for timer interval.",COLORS.RED)
        return

    try:
        sizes=[int(i) for i in args["--sizes"].split(",")]
    except ValueError:
        print_color("Bad value for sizes.",COLORS.RED)
        return

    try:
        validation_ratio=float(args["--validation-ratio"])
    except ValueError:
        print_color("Bad value for validation ratio.",COLORS.RED)
        return

    print_color("Opening file: %s"%train_csv,COLORS.YELLOW)

    X_train,Y_train,X_valid,Y_valid=get_data_2csv(train_csv,prediction_csv,
            validation_ratio,normalize=args["--normalize"])
    if validation_ratio==1 and args["--validate"]:
        X_valid,Y_valid=X_train,Y_train

    if sizes[0]!=len(X_train[0]):
        print_color("Bad 'sizes' parameter for this input data. sizes[0]=%s len(X[0])=%s"%(sizes[0],len(X_train[0])),COLORS.RED)
        return

    start_time=time.time()
    print_color("Initializing neural net.",COLORS.GREEN)
    nn=NeuralNet(sizes,learning_rate=learn_rate,final_learning_rate=final_learn_rate,
            verbose=args["--verbose"],timer_interval=interval,
            logging=args["--logging"])
    nn.train(X_train,Y_train,trials,batch_size=batch_size)

    report=0
    if args["--validate"]:
        print_color("Starting validation.",COLORS.GREEN)
        report=nn.show_report(X_train,Y_train,X_valid,Y_valid)
    if args["--report"]:
        if not report:
            report=nn.get_report(X_train,Y_train,X_valid,Y_valid)
        report["validation ratio"]=validation_ratio
        report["normalized"]=args["--normalize"]
        report["random"]=args["--random"]
        report["duration"]=time.time()-start_time
        save_report(report)
    if target:
        raise NotImplementedError
        print_color("Making predictions.",COLORS.GREEN)
        nn.make_predictions_csv(target)

    print_color("Done after %s seconds."%round(time.time()-start_time,1),COLORS.GREEN)
Esempio n. 45
0
            if status == 1:
                # print "Player 1 has won"
                p1 += 1
            elif status == -1:
                # print "Player 2 has won"
                p2 += 1
            elif status == 0:
                # print "Game was a draw"
                pass
            PLAYER *= -1
            board_current = game.linear_board()
            print board_current[0:3]
            print board_current[3:6]
            print board_current[6:9]
    wins = p1
    losses = p2
    draws = n - (wins + losses)
    return wins, losses, draws


if __name__ == '__main__':
    f = open("player_genomes/best_genome.txt", 'rb').read()
    data = f.split('\n')[:-1]
    w1, w2, w3, nx, n1, n2, ny, fit = parse_genome(data)
    nn_p = NeuralNet()
    nn_p.load_from_genome(w1, w2, w3, nx, n1, n2, ny, fit)
    print play_a_game(nn_p, 1)
'''
[X - X]
[X O -]
[O - O]'''
Esempio n. 46
0
train_labels = np.load("train_labels.pkl")

# split to obtain train and test set
x_train, x_test, y_train, y_test = train_test_split(train_features, train_labels, test_size=0.33)

# network topology
n_inputs = train_features.shape[1]
n_outputs = 10
n_hiddens_nodes = 200
n_hidden_layers = 1

# specify activation functions per layer
activation_functions = [tanh_function] * n_hidden_layers + [sigmoid_function]

# initialize the neural network
network = NeuralNet(n_inputs, n_outputs, n_hiddens_nodes, n_hidden_layers, activation_functions)

# start training on test set one
learning_rate = 0.001
plot_step = 5000
pred_epochs = np.arange(0, x_train.shape[0], plot_step)
errors = network.train(x_train, y_train, learning_rate, pred_epochs=pred_epochs)
plt.plot(errors)
plt.savefig("nn_errors.png")

# save the trained network
# network.save_pkl_to_file( "trained_configuration.pkl" )

# load a stored network configuration
# network = NeuralNet.load_pkl_from_file( "trained_configuration.pkl" )