Exemplo n.º 1
0
def compare_to_ann():
    """
    Regression problem on two functions (sin, square) with added noise
    using batch learning with a 2 layer neural network
    """

    epochs = 5000
    hidden_neurons = 63  # same number as RBF nodes
    output_neurons = 1

    sin, square = generate_data.sin_square(verbose=verbose)
    sin = add_noise_to_data(sin)
    square = add_noise_to_data(square)

    data = square  # use which dataset to train and test

    batch_size = data.train_X.shape[
        0]  # set batch size equal to number of data points

    ann = ANN(epochs, batch_size, hidden_neurons, output_neurons)

    y_pred = ann.solve(data.train_X, data.train_Y, data.test_X, data.test_Y)

    error = 0.
    for i in range(data.test_Y.shape[0]):
        error += np.abs(data.test_Y[i] - y_pred[i])

    test_error = error / data.test_Y.shape[0]

    print('Test error: ', test_error)

    plotter.plot_2d_function(data.test_X, data.test_Y, y_pred=y_pred)
Exemplo n.º 2
0
def evaluate(ind):
    ann = ANN(ind)
    error = 0.0;
    for i in range(0,inputs.shape[0]):
        out=ann.evaluate(inputs[i])
        error = error + ((out[0]-outputs[i][0])**2) + ((out[1]-outputs[i][1])**2)
    return error,
Exemplo n.º 3
0
def train(text, continue__ = 0):
    global data_size, data_len, char_size

    data = sample_data(text, sample_num = data_size, data_len = data_len)
    print data[0].shape
    print data[1].shape
    l = int(data_size * 0.7)
    datasets = [(shared(data[0][:l]), shared(data[1][:l])),
                (shared(data[0][l:]), shared(data[1][l:]))]

#ann
    theano.config.exception_verbosity='high'
    theano.config.on_unused_input='ignore'

    if not continue__:
        cl = ANN(data_len * char_size, char_size, hiddens = [100, 100], lmbd = 0)
        cl.fit(datasets, lr = theano.tensor.cast(2, theano.config.floatX), n_epochs = 200, batch_size = 200)

        dump(cl, open('save.dat', 'wb'))
    else:
        try:
            os.rename('save.dat', 'origin.dat')
        except:
            pass
        cl = load(open('origin.dat','rb'))
        print cl
        cl.fit(datasets, lr = theano.tensor.cast(1, theano.config.floatX), n_epochs = 100, batch_size = 200)

        dump(cl, open('save.dat', 'wb'))
    def __init__(self,
                 D_hidden_dim,
                 G_hidden_dim,
                 z_dim,
                 hyperparams={},
                 dataset='mnist',
                 image_dim=None):

        self.dataset = dataset
        if dataset.lower() == 'mnist':
            image_dim = 28 * 28
            self.digit = hyperparams.get("digit", 2)
        elif dataset.lower() == 'celeba_bw':
            print("This basic GAN version probably will not converge. \
				See TTitcombe/GANmodels for more powerful versions \
				(in development)")
            image_dim = 178 * 218
        elif dataset is None and image_dim is None:
            raise RuntimeError("You must either define a recognised dataset \
								or define an input image dimension")
        else:
            raise NotImplementedError("The dataset you have selected \
			 							is not recognised")

        self.epochs = hyperparams.get("epochs", 100)
        self.batchSize = hyperparams.get("batchSize", 64)
        self.lr = hyperparams.get("lr", 0.001)
        self.decay = hyperparams.get("decay", 1.)
        self.epsilon = hyperparams.get("epsilon", 1e-7)  #avoid overflow

        self.D = ANN(image_dim, D_hidden_dim, 1, self.lr, False)
        self.G = ANN(z_dim, G_hidden_dim, image_dim, self.lr, True)
Exemplo n.º 5
0
def train_sgd():
    data = load(FNAME)

    global MIN, MAX
    MIN = data.min(axis=0)[:-3]
    MAX = data.max(axis=0)[:-3]
    MIN[MIN==MAX] = 0.

    div = (MAX - MIN)
    div[div==0.] = 0.00000001
    data[:,:-3] /= div


    global TRAIN_POS, TRAIN_NEG
    TRAIN_POS = (data[:,-3] == 1).sum()
    TRAIN_NEG = (data[:,-3] == 0).sum()


    loss = "log"
    #loss = "modified_huber"
#    c_t0 = SGDClassifier(loss, n_iter=5)
#    c_t1 = SGDClassifier(loss, n_iter=5)
#    c_t2 = SGDClassifier(loss, n_iter=5)

    c_t0 = ANN([data.shape[1] - 3, 2000, 1])
#    c_t1 = ANN([data.shape[1] - 3, 100, 2])
#    c_t2 = ANN([data.shape[1] - 3, 100, 2])


    N = data.shape[0]
    indices = np.array(range(N))
    pos = data[:,-3] == 1
    neg = ~pos

    pos_ii = indices[pos]
    neg_ii = indices[neg]


    for e in range(EPOCHES):
        np.random.shuffle(pos_ii)
        np.random.shuffle(neg_ii)
        for_train = np.concatenate((pos_ii[:MINI_BATCH_SIZE], neg_ii[:MINI_BATCH_SIZE+1]))

#        y0 = np.array([1. - data[for_train,-3], data[for_train,-3]], dtype=np.float64)
#        y1 = np.array([1. - data[for_train,-2], data[for_train,-2]], dtype=np.float64)
#        y2 = np.array([1. - data[for_train,-1], data[for_train,-1]], dtype=np.float64)

        y0 = data[for_train,-3]
#        y1 = data[for_train,-2]
#        y2 = data[for_train,-1]

        c_t0.partial_fit(data[for_train,:-3], y0, [0, 1])
#        c_t1.partial_fit(data[for_train,:-3], y1, [0, 1])
#        c_t2.partial_fit(data[for_train,:-3], y2, [0, 1])

        print "Epoch %d out of %d  done" % (e, EPOCHES)

    data = None

    return c_t0, None, None  #, c_t1, c_t2
Exemplo n.º 6
0
def analyzeSymbol(symbol):
    startTime = time.time()
    flag = 0
    trainingData = getTrainingData(symbol)

    network = ANN(inNode=3, hiddenNode=3, outNode=1)

    network.training(trainingData)

    # get rolling data for most recent day

    #network.training(trainingData)
    for i in range(0, 5):
        # get rolling data for most recent day
        predictionData = getPredictionData(symbol, flag)
        returnPrice = network.test(predictionData)

        # de-normalize and return predicted stock price
        predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                               predictionData[2])

        print predictedStockPrice
        flag += 1
        global new_value
        new_value = predictedStockPrice

    return predictedStockPrice
Exemplo n.º 7
0
 def __init__(self, input_size, num_hidden_layers, hidden_layer_sizes,
             output_size, epochs=50, batch_size=1, fit_verbose=2,
             variables=None, weight_file=''):
     super().__init__()
     self.weight_file = weight_file
     self.model = ANN(input_size, num_hidden_layers, hidden_layer_sizes,
                     output_size, epochs=epochs, batch_size=batch_size,
                     fit_verbose=fit_verbose, variables=variables)
Exemplo n.º 8
0
def test(ann, images, img_size, S, F, neurons_num):

    ww, bb = ann.get_weights()
    ss = ann.ss[: len(ann.ss)/2 + 1]
    flt = ANN(ss, .0)
    ww_size = 0
    bb_size = 0
    for l in range(1, len(ss)):
        ww_size += ss[l-1] * ss[l]
        bb_size += ss[l]
    flt.set_weights(ww[:ww_size], bb[:bb_size])


    N = images.shape[0]
    ii = range(N)
    np.random.shuffle(ii)
    ii = ii[:10]

    to_show = None
    for i in ii:
        tmp = images[i].copy()
        tmp = tmp.reshape((img_size,img_size))


        flt_img = np.zeros((neurons_num,neurons_num))

        r = 0
        c = 0
        for patch in patches(tmp, img_size, S, F):
            p = flt.predict_proba(patch.flatten())

            flt_img[r,c] = p[0,1]
            c = (c + 1) % neurons_num
            if c == 0:
                r = (r + 1) % neurons_num


        tmp = tmp.reshape((img_size,img_size))

        tmp *= 255
        flt_img *= 255

	flt_img = np.concatenate((flt_img, [[0]*(img_size-neurons_num)]* neurons_num),           axis=1)
        flt_img = np.concatenate((flt_img, [[0]* img_size             ]*(img_size-neurons_num)), axis=0)


        if to_show == None:
            to_show = np.concatenate((tmp, flt_img), axis=1)
        else:
            to_show = np.concatenate(( to_show, np.concatenate((tmp, flt_img), axis=1) ), axis=0)
        
    to_show = to_show.astype(np.uint8)

    plot.clf()
    plot.imshow(to_show)
    plot.show()
Exemplo n.º 9
0
def test(ann, images, img_size, S, F, neurons_num):

    ww, bb = ann.get_weights()
    ss = ann.ss[:len(ann.ss) / 2 + 1]
    flt = ANN(ss, .0)
    ww_size = 0
    bb_size = 0
    for l in range(1, len(ss)):
        ww_size += ss[l - 1] * ss[l]
        bb_size += ss[l]
    flt.set_weights(ww[:ww_size], bb[:bb_size])

    N = images.shape[0]
    ii = range(N)
    np.random.shuffle(ii)
    ii = ii[:10]

    to_show = None
    for i in ii:
        tmp = images[i].copy()
        tmp = tmp.reshape((img_size, img_size))

        flt_img = np.zeros((neurons_num, neurons_num))

        r = 0
        c = 0
        for patch in patches(tmp, img_size, S, F):
            p = flt.predict_proba(patch.flatten())

            flt_img[r, c] = p[0, 1]
            c = (c + 1) % neurons_num
            if c == 0:
                r = (r + 1) % neurons_num

        tmp = tmp.reshape((img_size, img_size))

        tmp *= 255
        flt_img *= 255

        flt_img = np.concatenate(
            (flt_img, [[0] * (img_size - neurons_num)] * neurons_num), axis=1)
        flt_img = np.concatenate(
            (flt_img, [[0] * img_size] * (img_size - neurons_num)), axis=0)

        if to_show == None:
            to_show = np.concatenate((tmp, flt_img), axis=1)
        else:
            to_show = np.concatenate(
                (to_show, np.concatenate((tmp, flt_img), axis=1)), axis=0)

    to_show = to_show.astype(np.uint8)

    plot.clf()
    plot.imshow(to_show)
    plot.show()
Exemplo n.º 10
0
    def __init__(self, x, y, W, H, food):
        super(Animal, self).__init__(x, y, W, H)

        self.brain = ANN([1, 2])
        """ the orientation of the animal in the environment (range: 0 to 2pi) """
        self.orientation = 0
        """ initialise speed """
        self.speed = ANIMAL_MOVE_SPEED
        """ number of food eaten in this generation by this animal """
        self.num_food = 0

        self.food = food
Exemplo n.º 11
0
def online_eval():
    # evaluation code for online test
    handler = DataHandler()
    data = handler.generate_data(TRAIN_FILENAME)
    testing_data = handler.generate_data(TEST_FILENAME, "test")
    ann = ANN(9, 10, 1)
    for i in range(80):
        print(i + 1)
        ann.train(data, 5000)

    result = ann.test_without_true_label(testing_data, 0.23)
    handler.write_to_result(TEST_FILENAME, result)
Exemplo n.º 12
0
 def train(self, hidden_layers, epochs, learning_rate):
     labels = self.dp_train.labels()
     for label in labels:
         data = self.dp_train.binarizeU(label, upsampled=True)
         X = data[:, 0:-1]
         y_train_logistic = data[:, -1]
         logistic_classifier = ANN(hidden_layers,
                                   epochs,
                                   learning_rate,
                                   verbose=False)
         logistic_classifier.train(X, y_train_logistic)
         self.logistic_classifiers.append(logistic_classifier)
Exemplo n.º 13
0
	def test_mnist_28by28(self):
		import time
		import os
		import numpy as np
		import matplotlib.pyplot as plt
		from sklearn.cross_validation import train_test_split
		from sklearn.datasets import load_digits
		from sklearn.metrics import confusion_matrix, classification_report
		from sklearn.preprocessing import LabelBinarizer
		from ann import ANN

		# load lecun mnist dataset
		X = []
		y = []
		with open('data/mnist_test_data.txt', 'r') as fd, open('data/mnist_test_label.txt', 'r') as fl:
			for line in fd:
				img = line.split()
				pixels = [int(pixel) for pixel in img]
				X.append(pixels)
			for line in fl:
				pixel = int(line)
				y.append(pixel)
		X = np.array(X, np.float)
		y = np.array(y, np.float)

		# normalize input into [0, 1]
		X -= X.min()
		X /= X.max()

		# quick test
		#X = X[:1000]
		#y = y[:1000]

		# for my network
		X_test = X
		y_test = y #LabelBinarizer().fit_transform(y)

		nn = ANN([1,1])
		nn = nn.deserialize('28_200000.pickle') # '28_100000.pickle'

		predictions = []
		for i in range(X_test.shape[0]):
			o = nn.predict(X_test[i])
			predictions.append(np.argmax(o))

		# compute a confusion matrix
		print("confusion matrix")
		print(confusion_matrix(y_test, predictions))

		# show a classification report
		print("classification report")
		print(classification_report(y_test, predictions))
Exemplo n.º 14
0
    def getAnns():
        builderNoConv = BuilderCNN_MNIST(removeConvLayers=True)

        cnn1 = ANN(BuilderCNN_MNIST(18))
        dnn_huConv = ANN(builderNoConv, PreprocessorHuConv4Dnn(cnn1))
        dnn_conv = ANN(builderNoConv,
                       PreprocessorHuConv4Dnn(cnn1, removeHuPreprocess=True))

        cnn2 = ANN(BuilderCNN_MNIST(18 + 7))
        dnn_conv2 = ANN(builderNoConv,
                        PreprocessorHuConv4Dnn(cnn2, removeHuPreprocess=True))

        return [cnn1, dnn_huConv, dnn_conv, cnn2, dnn_conv2]
Exemplo n.º 15
0
 def train(self, hidden_layers, epochs, learning_rate):
     labels = self.dp_train.labels()
     self.labels_combination = list(itertools.combinations(labels, 2))
     for labels in self.labels_combination:
         data = self.dp_train.binarize(labels)
         X = data[:, 0:-1]
         y = data[:, -1]
         logistic_classifier = ANN(hidden_layers,
                                   epochs,
                                   learning_rate,
                                   verbose=False)
         logistic_classifier.train(X, y)
         self.logistic_classifiers.append(logistic_classifier)
Exemplo n.º 16
0
    def test_ann(self):

        ## The ANN annotation CSV file.
        ann = ANN("testdata/ANN/000000_00_00_00.csv")

        # The tests.

        # The headers.
        self.assertEqual(ann.get_number_of_headers(), 2)
        self.assertEqual(ann.get_header(0), "annotation_id")
        self.assertEqual(ann.get_header(1), "annotation")

        # The annotations.

        # Test the number of annotations found.
        self.assertEqual(ann.get_number_of_annotations(), 88)
Exemplo n.º 17
0
def main():
    if len(sys.argv) != 4:
        print(
            f"USAGE: {sys.argv[0]} <saved_model_path> <test_set_path> <predictions_output_path>"
        )
        return

    saved_model_path = sys.argv[1]
    test_set_path = sys.argv[2]
    predictions_output_path = sys.argv[3]

    ann = ANN.load(saved_model_path)
    test_data, _ = load_dataset(test_set_path)
    print(f"{test_data[0].shape}")

    predictions_vectors = [
        ann.predict(test_data[i]) for i in range(len(test_data))
    ]
    # We need to add one to the prediction, because the provided datasets' tags are 1-based
    predictions = [
        from_categorical(predictions_vectors[i]) + 1
        for i in range(len(predictions_vectors))
    ]
    print(predictions)

    with open(predictions_output_path, "w") as file_:
        file_.write("\n".join([str(x) for x in predictions]))
Exemplo n.º 18
0
	def __init__(self):
		#change 3rd param
		"""
		Usually you should start with a high learning rate and a low momentum. Then you decrease the learning rate over time
		and increase the momentum. The idea is to allow more exploration at the beginning of the learning and force convergence
		at the end of the learning. Usually you should look at the training error to set up your learning schedule: 
		if it got stuck, i.e. the error does not change, it is time to decrease your learning rate.
		"""
		self.neural = ANN(48*48,7,2,1200,10,0.5)
Exemplo n.º 19
0
    def run(self, lambd=0, keep_prob=1):

        train_X, train_Y, test_X, test_Y = data_service.load_2D_dataset()

        ann = ANN()
        learning_rate = 0.3
        parameters, costs = ann.fit(train_X,
                                    train_Y,
                                    learning_rate=learning_rate,
                                    lambd=lambd,
                                    keep_prob=keep_prob)

        plotting_service.plot_loss_per_iteration_for_learning_rate(
            costs, learning_rate)

        plotting_service.plot_decision_boundary(
            lambda x: ann_service.predict_dec(parameters, x.T), train_X,
            train_Y)
Exemplo n.º 20
0
	def test_xor_trainig(self):
		print("test_xor_trainig...")
		nn = ANN([2, 2, 1])
		inputs = [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]
		targets = [[0.0], [1.0], [1.0], [0.0]]
		predicts = []

		# train
		nn.train(40000, inputs, targets)

		for i in range(len(targets)):
			predicts.append(nn.predict(inputs[i]))

		# the prediction for 0,0 and 1,1 should be less than prediction for 0,1 and 1,0
		self.assertTrue(predicts[0] < predicts[1], 'xor relation1 not learned')
		self.assertTrue(predicts[0] < predicts[2], 'xor relation2 not learned')
		self.assertTrue(predicts[3] < predicts[1], 'xor relation3 not learned')
		self.assertTrue(predicts[3] < predicts[2], 'xor relation4 not learned')
Exemplo n.º 21
0
def ann_boost(training_set, validation_set, num_hidden_units,
              weight_decay_coeff, num_ann_training_iters,
              num_boosting_training_iters):
    # Add a column to the front of the example matrix containing the initial weight for each example
    example_weights = np.full((training_set.shape[0], 1),
                              1.0 / len(training_set))
    anns = []
    alphas = []
    for i in xrange(0, num_boosting_training_iters):
        print('\nBoosting Iteration ' + str(i + 1))
        weighted_training_set = np.column_stack(
            (example_weights, training_set))
        ann = ANN(weighted_training_set,
                  validation_set,
                  num_hidden_units,
                  weight_decay_coeff,
                  weighted_examples=True)
        ann.train(num_ann_training_iters, convergence_err=0.5, min_iters=1)
        actual_labels = ann.training_labels
        assigned_labels = ann.output_labels
        error = weighted_training_error(example_weights, actual_labels,
                                        assigned_labels)
        alpha = classifier_weight(error)
        print('\n\talpha: ' + str(alpha))
        if alpha == float('inf'):
            alphas = [float('inf')]
            anns = [ann]
            break
        anns.append(ann)
        alphas.append(alpha)
        if alpha != 0.0:
            example_weights = update_example_weights(example_weights, alpha,
                                                     actual_labels,
                                                     assigned_labels)
        else:
            break
    alphas = np.array(alphas)
    vote_labels = weighted_vote_labels(anns, alphas)
    assert ann is not None
    actual_labels = ann.validation_labels
    label_pairs = zip(actual_labels, vote_labels)
    accuracy, precision, recall, fpr = evaluate_ann_performance(
        None, label_pairs)
    return accuracy, precision, recall, fpr
Exemplo n.º 22
0
def main(argv):
    argc = len(argv)
    if argc < 2:
        print(get_help())
        exit(0)

    if argv[1] == 't':
        net = ANN(Functions.SIGMOID, Functions.MSE)
        nb = NaiveBayes()
        bn = Bernoulli()
        selected_tweets = reader.read(argv[2])
        rejected_tweets = reader.read(argv[3])
        t1 = threading.Thread(target=train_net,\
                              args=(net, selected_tweets, rejected_tweets))
        t2 = threading.Thread(target=train_nb,\
                              args=(nb, selected_tweets, rejected_tweets))
        t3 = threading.Thread(target=train_bn,\
                              args=(bn, selected_tweets, rejected_tweets))
        t1.start()
        t2.start()
        t3.start()
        t1.join()
        t2.join()
        t3.join()

    elif argv[1] == 'c':
        f_net = open(argv[2], 'rb')
        net = pickle.load(f_net)
        f_net.close()

        f_nb = open(argv[3], 'rb')
        nb = pickle.load(f_nb)
        f_nb.close()

        f_bn = open(argv[4], 'rb')
        bn = pickle.load(f_bn)
        f_bn.close()

        tweets = reader.read(argv[5])

        t1 = threading.Thread(target=classify_using_net,\
                              args=(net, tweets))
        t2 = threading.Thread(target=classify_using_nb,\
                              args=(nb, tweets))
        t3 = threading.Thread(target=classify_using_bn,\
                              args=(bn, tweets))
        t1.start()
        t2.start()
        t3.start()
        t1.join()
        t2.join()
        t3.join()
    else:
        print(get_help())
        exit(0)
Exemplo n.º 23
0
    def __init__(self):
        # load the layers of sketch-a-net with pretrained weights
        self.layers = load_layers('./data/model_without_order_info_224.mat')

        # load the corpuses of mark matches at layers 2 and 4
        # layer 2 matches low level marks like lines and
        # layer 4 matches higher levels marks like closed forms.
        with open('./data/corpus' + file_extension + '4.txt', 'rb') as fp:
            imgs, acts = pickle.load(fp)
            self.imgs4 = imgs
        with open('./data/corpus' + file_extension + '2.txt', 'rb') as fp:
            imgs, acts = pickle.load(fp)
            self.imgs2 = imgs

        # init the approximate nearest neighbors class for mark matching
        # the results from this can be fetched from self.imgs*
        self.ANN = ANN()

        # the layers for this repeater from sketch-a-net
        self.layer_names = ['conv1', 'conv2', 'conv3', 'conv4']
Exemplo n.º 24
0
def main():
    if len(sys.argv) != 4:
        print("Invalid number of arguments.")
        print(
            f"Expected usage: python {sys.argv[0]} train_images train_labels validation_images"
        )
        return

    test_label_path = None
    history_images = None
    history_labels = None
    history_accuracy = None
    if devel:
        test_label_path = "validation-labels.txt"

    (train_images, train_labels, test_images,
     test_labels) = load_data(sys.argv[1], sys.argv[2], sys.argv[3],
                              test_label_path)
    train_labels_ohv = labels_to_1_hot(train_labels)

    if devel:
        test_labels_ohv = labels_to_1_hot(test_labels)
        history_images = test_images
        history_labels = test_labels_ohv
        history_accuracy = calculate_accuracy

    network = ANN(784, 'mean_square_error', regularization='L2', lambd=0.01)
    network.add_layer(10, 'leaky_relu')
    network.add_layer(10, 'leaky_relu')
    network.add_layer(10, 'tanh')

    (train_loss, test_loss, train_acc,
     test_acc) = network.train(train_images, train_labels_ohv, 40, 10, 0.1,
                               0.1 / 40, devel, history_images, history_labels,
                               history_accuracy)

    test_out = network.eval(test_images)

    if not devel:
        labels = test_out.argmax(1)
        for i in labels:
            print(f"{i}")
    else:
        xaxis = [x for x in range(train_loss.size)]
        plt.figure(1)
        plt.plot(xaxis, train_loss, xaxis, test_loss)
        plt.legend(("Training loss", "Test loss"))
        plt.figure(2)
        line = plt.plot(xaxis, train_acc, xaxis, test_acc)
        plt.legend(("Training accuracy", "Test accuracy"))
        plt.show()
Exemplo n.º 25
0
 def annFromFVs(self):
     # [for px in A, get featureVector]
     fvs = featureVector.getAllFeatureVectors(self.A,self.A1)
     # get dim
     print(len(fvs),len(fvs[0]))
     dim = len(fvs[0])
     self.ann = ANN(dim)
     # add these feature vectors to ann
     self.ann.addVectors(fvs)
     if self.debug:
         print("populated the ANN")
     self.ann.save()
Exemplo n.º 26
0
def main():
    if len(sys.argv) != 3:
        print(f"USAGE {sys.argv[0]} <model_path> <dataset_path>")
        return

    model_path = sys.argv[1]
    dataset_path = sys.argv[2]

    ann = ANN.load(model_path)
    data, tags = load_dataset(dataset_path)

    print(ann.evaluate(data, tags))
Exemplo n.º 27
0
def ann_bag(training_set, validation_set, num_hidden_units, weight_decay_coeff,
            num_ann_training_iters, num_bagging_training_iters):
    iter_labels = None
    example_weights = np.full((training_set.shape[0], 1),
                              1.0 / len(training_set))
    for i in xrange(0, num_bagging_training_iters):
        print('\nBagging Iteration ' + str(i + 1))
        replicate_set = bootstrap_replicate(training_set, seed_value=i)
        weighted_replicate_set = np.column_stack(
            (example_weights, replicate_set))
        ann = ANN(weighted_replicate_set,
                  validation_set,
                  num_hidden_units,
                  weight_decay_coeff,
                  weighted_examples=True)
        ann.train(num_ann_training_iters, convergence_err=0.5)
        if iter_labels is not None:
            iter_labels = np.column_stack((iter_labels, ann.evaluate()[1]))
        else:
            iter_labels = ann.evaluate()[1]
    voting_labels = np.apply_along_axis(most_common_label, 1, iter_labels)
    assert ann is not None
    actual_labels = ann.validation_labels
    label_pairs = zip(actual_labels, voting_labels)
    accuracy, precision, recall, fpr = evaluate_ann_performance(
        None, label_pairs)
    return accuracy, precision, recall, fpr
Exemplo n.º 28
0
    def __init__(self, input_size, hidden_layers, output_size):

        # network input
        self.X = tf.placeholder(tf.float32, [None, input_size], name='X')

        # encoders and decoder are just fully connected networks
        self.encoder = ANN(input_size, hidden_layers, output_size)
        M = output_size // 2
        self.decoder = ANN(M, hidden_layers[::-1], input_size)

        # Construct the sampling distribution form the output of the encoder
        self.encoder_out = self.encoder.forward(self.X)
        self.means = self.encoder_out[:, :M]
        self.stddev = tf.nn.softplus(self.encoder_out[:, M:]) + 1e-6

        with st.value_type(st.SampleValue()):
            self.Z = st.StochasticTensor(
                Normal(loc=self.means, scale=self.stddev))

        # network output
        self.logits = self.decoder.forward(self.Z)
        self.pX = Bernoulli(logits=self.logits)

        # Prior predictive sample
        standard_normal = Normal(loc=np.zeros(M, dtype=np.float32),
                                 scale=np.ones(M, dtype=np.float32))

        # initialize cost and training
        kl = tf.reduce_sum(
            tf.contrib.distributions.kl_divergence(self.Z.distribution,
                                                   standard_normal), 1)

        expected_log_likelihood = tf.reduce_sum(self.pX.log_prob(self.X), 1)

        self.elbo = tf.reduce_sum(expected_log_likelihood - kl)
        self.train_op = tf.train.RMSPropOptimizer(
            learning_rate=0.001).minimize(-self.elbo)

        self.X_hat = self.pX.sample()
Exemplo n.º 29
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()

    trainingData = getTrainingData(stockSymbol)

    network = ANN(inNode=3, hiddenNode=3, outNode=1)

    network.training(trainingData)

    # get rolling data for most recent day
    predictionData = getPredictionData(stockSymbol)

    # get prediction
    returnPrice = network.test(predictionData)

    # de-normalize and return predicted stock price
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                           predictionData[2])

    # create return object, including the amount of time used to predict

    return predictedStockPrice
Exemplo n.º 30
0
def main():
  import input
  logging.basicConfig(level=logging.INFO, stream=sys.stdout)
  np.set_printoptions(precision=3, edgeitems=3, threshold=20)

  random.seed(5108) # used by the GA
  randSample = random.Random(input.SAMPLE_SEED) # used for data set sampling

  inp = input.Input("train3-std.tsv", randSample)
  print "Train set:",
  inp.trainSet.show()
  
  print "Test set:",
  inp.testSet.show()

  n = inp.trainSet.size * 20/100
  a = ANN()
  a.prepare(inp.trainSet, POPSIZE)
  
  tester = SampleTester()
  tester.prepare(inp.testSet, randSample)
  tester.showSampleSets()

  params = []
  generatePop(params)
  mutateValue = 6.0

  for genIndex in range(5000):
    print "Generation", genIndex, "starting."
    logFP("Population", params)
    outputValues = a.evaluate(params, returnOutputs=True)
    
    logFP("Outputs", outputValues)
    
    thresholds = a.nlargest(n)
    logFP("Thresholds", thresholds)

    lifts = a.lift(n)
    logFP("Lifts", lifts)

    taggedParams = sorted(zip(lifts, params, range(len(params))),
                          key=lambda (l, p, i): l,
                          reverse=True)
    sortedParams = [p for l, p, i in taggedParams]
    logFP("Sorted pop", sortedParams)

    testLift, _ = tester.test(sortedParams[0])

    genplot.addGeneration(lifts, testLift, genIndex)
    
    params = generateGeneration(sortedParams, mutateValue)
    if genIndex%500 == 499:
        mutateValue -= 0.5

  args = sys.argv[1:]
  if len(args) == 1:
    open(args[0], "w").write(repr(sortedParams[0]))

  genplot.plot()
Exemplo n.º 31
0
def compare_learning_rate(X, Y):
    """
    This function studies the convergence while varying the learning rate
    :param X: Training inputs
    :param Y: Training targets
    :return: plot of the evolution of the error along the iterations
    for different values of the learning rate

    For this function the update rule and the method (batch/sequential) has to be changed
    manually in params "learn_method" and in the training function respectively

    """

    fig, ax = plt.subplots()
    eta = np.linspace(0.0005, 0.0015, 5)
    for e in eta:
        params = {
            "learning_rate": e,
            "batch_size": N,
            "theta": 0,
            "epsilon": -0.1,  # slack for error during training
            "epochs": 10,
            "act_fun": 'step',
            "m_weights": 0.9,
            "sigma_weights": 0.9,
            "nodes": 1,
            "learn_method": 'delta_rule' #'delta_rule'
        }

        training_method = 'sequential'  # 'batch' , 'sequential'

        ann = ANN(X, Y, **params)

        ann.train(training_method, verbose=verbose)

        ax.plot(range(len(ann.error_history)), ann.error_history, label='$\eta = {}$'.format(e))
        #ax.set_xlim(0, 40)
    ax.legend()
    plt.show()
Exemplo n.º 32
0
    def __init__(self, population):
        '''
        Create game AI.

        :param population: Number of AI to evolve.
        '''
        print("Creating game AI.")
        self.genomes = list()
        self.anns = list()
        self.generation = 1
        self.last_avg_fit = 0.0

        index = 0
        while population > 0:
            print("AI's left: " + str(population))
            ann = ANN()
            self.anns.append(ann)
            # Names will get strange if lots of brains are created
            self.genomes.append(FloatGenome(ann.get_internal_data(), 0.0,
                                            chr(ord('a') + index)))
            population -= 1
            index += 1
Exemplo n.º 33
0
class Soldier(object):
    """
    Class to hold the data for a soldier object.
    Each soldier has a "brain", which is a Sigmoid neural network with 120 inputs.

    The inputs represent squares in an 11x11 grid around the soldier's current position.
    Each square in the grid has an integer value representing the current status of that square.

    For example, if the value for "soldier" was 1, and there was another soldier at (4, 5) relative to this
    soldier, that input would have value 1. Other values may represent grass, water etc.
    """

    def __init__(self, pos):
        self.brain = ANN(constants.soldier_brain, Neuron.Sigmoid)

        self.pos = pos

    def think(self, squares):

        output = self.brain.run(squares)

        """ tolerance must be less than the minimum difference between defining values below """
        tolerance = 0.1

        """ values of the output defining behaviour """
        move_none   = 0
        move_left   = 0.25
        move_up     = 0.5
        move_right  = 0.75
        move_down   = 1

        """ decide where to move the soldier """
        if abs(output - move_none) < tolerance:
            pass
        elif abs(output - move_left) < tolerance:
            self.move(-1, 0)
        elif abs(output - move_up) < tolerance:
            self.move(0, -1)
        elif abs(output - move_right) < tolerance:
            self.move(1, 0)
        elif abs(output - move_down) < tolerance:
            self.move(0, 1)


    def move(self, dx, dy):
        """
        Move this solder on the map
        The actual updating of the map grid is done in gamemap.py
        """
        self.x += dx
        self.y += dy
Exemplo n.º 34
0
    def anncv(Xtrain, Ytrain, Xtest, Ytest):
        inputsize = Xtrain.shape[1]
        hiddensize = 12
        outputsize = len(classes)
        ann = ANN(inputsize,hiddensize,outputsize)

        mtrain = len(Ytrain)
        # for this example, im only training with the first 12 examples
        for n in xrange(400):
            for i in xrange(mtrain):
                x,y = Xtrain[i],Ytrain[i]
                target = np.zeros(len(classes))
                target[classes.index(y)] = 1
                ann.train(x,target,alpha=0.1,momentum=0.2)
        
        mtest = len(Ytest)
        Ypredict = np.zeros(mtest)
        for i in xrange(mtest):
            x,y = Xtest[i],Ytest[i]
            results = ann.predict(x)
            prediction = results.argmax()
            Ypredict[i] = prediction
        return Ypredict
Exemplo n.º 35
0
    def anncv(Xtrain, Ytrain, Xtest, Ytest):
        inputsize = Xtrain.shape[1]
        hiddensize = 12
        outputsize = len(classes)
        ann = ANN(inputsize, hiddensize, outputsize)

        mtrain = len(Ytrain)
        # for this example, im only training with the first 12 examples
        for n in xrange(400):
            for i in xrange(mtrain):
                x, y = Xtrain[i], Ytrain[i]
                target = np.zeros(len(classes))
                target[classes.index(y)] = 1
                ann.train(x, target, alpha=0.1, momentum=0.2)

        mtest = len(Ytest)
        Ypredict = np.zeros(mtest)
        for i in xrange(mtest):
            x, y = Xtest[i], Ytest[i]
            results = ann.predict(x)
            prediction = results.argmax()
            Ypredict[i] = prediction
        return Ypredict
Exemplo n.º 36
0
    def __init__(self, population):
        '''
        Create game AI.

        :param population: Number of AI to evolve.
        '''
        print("Creating game AI.")
        self.genomes = list()
        self.anns = list()
        self.generation = 1
        self.last_avg_fit = 0.0

        index = 0
        while population > 0:
            print("AI's left: " + str(population))
            ann = ANN()
            self.anns.append(ann)
            # Names will get strange if lots of brains are created
            self.genomes.append(
                FloatGenome(ann.get_internal_data(), 0.0,
                            chr(ord('a') + index)))
            population -= 1
            index += 1
Exemplo n.º 37
0
def train(mode='new', train_times=100, lr=0.1, **kwargs):
    global data_len

    if mode in ['new', 'continue']:
        text = load_data()
        data = sample_data(text)
        print data[0].shape
        print data[1].shape
        l = int(data_size * 0.7)
        datasets = [(shared(data[0][:l]), shared(data[1][:l])),
                    (shared(data[0][l:]), shared(data[1][l:]))]

#ann
        theano.config.exception_verbosity='high'
        theano.config.on_unused_input='ignore'

    if mode=='new':
        cl = ANN(data_len * alphanum, alphanum, hiddens = [300, 300, 200], lmbd = 0)
        cl.fit(datasets, lr = theano.tensor.cast(lr, theano.config.floatX), n_epochs = train_times, batch_size = 200)

        dump(cl, open('save.dat', 'wb'))
    elif mode=='continue':
        try:
            os.rename('save.dat', 'origin.dat')
        except:
            pass
        cl = load(open('origin.dat','rb'))
        print cl
        cl.fit(datasets, lr = theano.tensor.cast(lr, theano.config.floatX), n_epochs = train_times, batch_size = 200)

        dump(cl, open('save.dat', 'wb'))

    elif mode=='create':
        cl = load(open('origin.dat','rb'))
        create(**kwargs)

    return cl
Exemplo n.º 38
0
    def __init__(self, input_size, convolution_layer_info,
                 fully_connected_layer_sizes, output_size):

        self.conpool_layers = []

        self.Xin = tf.placeholder(tf.float32, [None, *input_size], name="X")
        self.labels = tf.placeholder(tf.float32, [None, output_size],
                                     name="labels")

        C1 = input_size[-1]
        output_reduction_factor = np.array([0, 0])

        # Construct the conpool layers
        for i, layer_info in enumerate(convolution_layer_info):
            C2 = layer_info["window_depth"]
            self.conpool_layers.append(ConPoolLayer(C1, C2, layer_info))
            C1 = C2
            output_reduction_factor += layer_info["pooling_strides"][1:-2]

        # Calculate the input size of the fully connected layer so that it
        # couples to the convolutional layer
        #   - this will depend on stride of the pooling layer
        self.Cout = int(C2 *
                        np.prod(input_size[:-2] / output_reduction_factor))
        self.fully_connected_network = ANN(self.Cout,
                                           fully_connected_layer_sizes,
                                           output_size)

        self.logits = self.forward(self.Xin)

        self.cost = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
                                                    labels=self.labels))

        self.train_op = tf.train.AdamOptimizer().minimize(self.cost)

        self.predictions = self.predict(self.Xin)
Exemplo n.º 39
0
def train_ANN_PSO(inputs,
                  res_ex,
                  max_iter,
                  n_particle,
                  n_neighbor,
                  nb_h_layers,
                  nb_neurons_layer,
                  min_bound,
                  max_bound,
                  cognitive_trust,
                  social_trust,
                  inertia_start,
                  inertia_end,
                  velocity_max,
                  activations,
                  draw_graph=False):
    nb_neurons = set_nb_neurons(len(inputs[0]), nb_neurons_layer, nb_h_layers)
    # print(nb_neurons, n_neighbor, activations)
    ann = ANN(nb_neurons=nb_neurons,
              nb_layers=len(nb_neurons),
              activations=activations)
    dim = sum(nb_neurons[i] * nb_neurons[i + 1]
              for i in range(len(nb_neurons) - 1)) + len(nb_neurons) - 1
    pso = PSO(dim,
              lambda params: fitness_for_ann(params, ann, inputs, res_ex),
              max_iter=max_iter,
              n_particle=n_particle,
              n_neighbor=n_neighbor,
              comparator=minimise,
              min_bound=min_bound,
              max_bound=max_bound,
              cognitive_trust=cognitive_trust,
              social_trust=social_trust,
              inertia_start=inertia_start,
              inertia_end=inertia_end,
              velocity_max=velocity_max,
              endl='',
              version=2007)
    if draw_graph:
        pso.set_graph_config(inputs=inputs, res_ex=res_ex)
    pso.run()
    return pso, ann
Exemplo n.º 40
0
def run_model(which='all'):
    if which in ['ann', 'all', 'main', 'standard']:
        model = ANN(emb_size, vocab_size, hid_dim, hid_num, class_num,
                    sent_len).cuda()
        ann_loss = train(model, x, target, ann=True)
        plt.plot(ann_loss, label='ann')
    if which in ['wann', 'all', 'standard']:
        model = WANN(emb_size, vocab_size, hid_dim, hid_num, class_num,
                     sent_len).cuda()
        wann_loss = train(model, x, target, ann=True)
        plt.plot(wann_loss, label='wann')
    if which in ['rnn', 'all', 'main']:
        model = RNN(emb_size, vocab_size, hid_dim, hid_num, class_num).cuda()
        rnn_loss = train(model, x, target)
        plt.plot(rnn_loss, label='rnn')
    if which in ['exrnn', 'all']:
        model = EXRNN(emb_size, vocab_size, hid_dim, hid_num, class_num, 2000,
                      2000).cuda()
        exrnn_loss = train(model, x, target)
        plt.plot(exrnn_loss, label='exrnn')
    if which in ['exmem', 'all']:
        model = EXRNN(emb_size,
                      vocab_size,
                      hid_dim,
                      hid_num,
                      class_num,
                      2000,
                      forget_dim=None).cuda()
        exmem_loss = train(model, x, target)
        plt.plot(exmem_loss, label='exmem')
    if which in ['lstm', 'all', 'main']:
        model = LSTM(emb_size, vocab_size, hid_dim, hid_num, class_num).cuda()
        lstm_loss = train(model, x, target)
        plt.plot(lstm_loss, label='lstm')
    if which in ['gru', 'all', 'main']:
        model = GRU(emb_size, vocab_size, hid_dim, hid_num, class_num).cuda()
        gru_loss = train(model, x, target)
        plt.plot(gru_loss, label='gru')
    # plt.ylim([0, 2])
    plt.legend()
    plt.grid(True)
    plt.show()
Exemplo n.º 41
0
def perform_ova_single_nn():
    ann = ANN((100, ), 5000, 0.01, verbose=False)

    parameter_space = {
        'hidden_layer_sizes': [(50, ), (100, ), (150, ), (200, ), (50, 50),
                               (50, 100), (100, 50), (200, 50)],
        'learning_rate_init': [0.005, 0.01, 0.015, 0.02, 0.025],
        'max_iter': [2000, 3000, 4000, 5000]
    }
    grid = GridSearchCV(ann.mlp, parameter_space, n_jobs=-1, cv=3)
    grid.fit(x_train, y_train)
    print('Best parameters found:\n', grid.best_params_)
    print('Results on the test set:\n',
          classification_report(y_test, grid.predict(x_test)))
    # print('Results on the test set:\n', confusion_matrix(y_test, grid.predict(x_test)))

    means = grid.cv_results_['mean_test_score']
    stds = grid.cv_results_['std_test_score']
    params = grid.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))
Exemplo n.º 42
0
def main():
    nn = ANN([5, 5, 1], Utils.linear, Utils.linear_derivative)
    u, t = Utils.readData()

    for i in range(100):
        for j in range(len(u)):
            nn.backPropag(nn.computeLoss(u[j], t[j]), 0.01)

    # compute the errors
    diff = []
    for i in range(len(u)):
        predicted = nn.feedForward(u[i])
        diff.append(abs(predicted[0] - t[i][0]))
        print("Actual: {}, Predicted:{}".format(t[i][0], predicted[0]))

    print("Mean of errors: {}".format(mean(diff)))
Exemplo n.º 43
0
def train(mode='new', train_times=100, lr=0.1, **kwargs):
    global data_len

    if mode in ['new', 'continue']:
        text = load_data()
        data = sample_data(text)
        print data[0].shape
        print data[1].shape
        l = int(data_size * 0.7)
        datasets = [(shared(data[0][:l]), shared(data[1][:l])),
                    (shared(data[0][l:]), shared(data[1][l:]))]

        #ann
        theano.config.exception_verbosity = 'high'
        theano.config.on_unused_input = 'ignore'

    if mode == 'new':
        cl = ANN(data_len * alphanum,
                 alphanum,
                 hiddens=[300, 300, 200],
                 lmbd=0)
        cl.fit(datasets,
               lr=theano.tensor.cast(lr, theano.config.floatX),
               n_epochs=train_times,
               batch_size=200)

        dump(cl, open('save.dat', 'wb'))
    elif mode == 'continue':
        try:
            os.rename('save.dat', 'origin.dat')
        except:
            pass
        cl = load(open('origin.dat', 'rb'))
        print cl
        cl.fit(datasets,
               lr=theano.tensor.cast(lr, theano.config.floatX),
               n_epochs=train_times,
               batch_size=200)

        dump(cl, open('save.dat', 'wb'))

    elif mode == 'create':
        cl = load(open('origin.dat', 'rb'))
        create(**kwargs)

    return cl
Exemplo n.º 44
0
        else:
            failed += 1
    accuracy = float(passed)/float(passed+failed)
    print 'Passed: %d' % passed
    print 'Failed: %d' % failed
    print 'Accuracy: %f' % accuracy
    

# Load the pickled dataset
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = cPickle.load(f)
f.close()

# Convert training and testing sets
train_set = build_set(train_set[0], train_set[1])
test_set = build_set(test_set[0], test_set[1])

digits_ann = ANN(784,30,10,alpha=0.1)

print '+---------------+'
print '|Before Training|'
print '+---------------+'
test_network(digits_ann, test_set)

digits_ann.train_set(train_set)

print '+--------------+'
print '|After Training|'
print '+--------------+'
test_network(digits_ann, test_set)
Exemplo n.º 45
0
Arquivo: draw.py Projeto: vcte/kana
    (x, y) = (event.x, event.y)
    if (x >= 0 and x < width and y >= 0 and y < height):
        draw.ellipse((x - 10, y - 10, x + 10, y + 10), fill = (0xFF, 0xFF, 0xFF))
        refresh_draw()

def classify_draw(event):
    print("classifying drawing")
    ann.classify(ann.parse(normalize(image)))

def clear_draw(event):
    global draw
    draw.ellipse((-width, -height, width * 2, height * 2), fill = (0xFF, 0xFF, 0xFF))
    refresh_draw()

def outweights(l, j):
	return [ann.weights[l][i][j] for i in range(len(ann.weights[l]))]

def reconstruct(data):
	img = Image.new("LA", (16, 16))
	pxs = img.load()
	for x in range(16):
		for y in range(16):
			pxs[x, y] = (int(data[y * 16 + x] * 128 + 128), 255)
	return img

if __name__ == "__main__":
    ann = ANN()
    ann.load("256_iteration_30")
    
    gui()
Exemplo n.º 46
0
 def setup_network(self):
     self.move_classifier = ANN(ann_type="rlu2")
     self.errors = []
Exemplo n.º 47
0
# convert from numpy to normal python list for our simple implementation
X_train_l = X_train.tolist()
labels_train_l = labels_train.tolist()

# free memory
X = None
y = None


def step_cb(nn, step):
	print("ping")
	nn.serialize(nn, str(step) + ".pickle")

# load or create an ANN
nn = ANN([1,1])
serialized_name = '28_1000000.pickle'

if os.path.exists(serialized_name):
	# load a saved ANN
	nn = nn.deserialize(serialized_name)
else:
	# create the ANN with:
	# 1 input layer of size 64 (the images are 8x8 gray pixels)
	# 1 hidden layer of size 100
	# 1 output layer of size 10 (the labels of digits are 0 to 9)
	nn = ANN([784, 300, 10])

	# see how long training takes
	startTime = time.time()
Exemplo n.º 48
0
class Gui(tk.Tk):
    def __init__(self, delay, *args, **kwargs):
        tk.Tk.__init__(self, *args, **kwargs)
        self.title("2048-solver")
        self.cell_width = self.cell_height = 50
        self.dim = (4, 4)
        self.delay=delay
        screen_width = self.dim[0]*self.cell_width+1
        screen_height = self.dim[1]*self.cell_height+1
        self.canvas = tk.Canvas(self, width=screen_width, height=screen_height, borderwidth=0, highlightthickness=0)
        self.canvas.pack(side="top", fill="both", expand="true")
        self.color_dict = self.fill_color_dict()

        self.results_from_nn_playing = []
        self.results_from_random_playing = []
        self.results = []
        self.start_time = time()

        self.setup_network()
        self.user_control()
        self.start_game()

    def start_game(self):
        if len(self.results) < self.results_length:
            print("run nr", len(self.results))
            self.game_board = Game2048(board=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]])
            self.board = self.game_board.board
            self.game_board.generate_new_node()
            self.depth = 3
            self.move_count = 0
            self.draw_board()
            self.time = time()
            self.run_algorithm()
        else:
            if self.action[0] == "p":
                self.results_from_nn_playing = copy.copy(self.results)
                print("p, largest tile", max(self.results_from_nn_playing))
                print("p, average tile", sum(self.results_from_nn_playing)/float(len(self.results_from_nn_playing)))
            elif self.action[0] == "r":
                self.results_from_random_playing = copy.copy(self.results)
                print("r, largest tile", max(self.results_from_random_playing))
                print("r, average tile", sum(self.results_from_random_playing)/float(len(self.results_from_random_playing)))
            elif self.action[0] == "c":
                self.results_from_nn_playing = copy.copy(self.results)
                self.results_from_random_playing = [112]*50
                self.print_comparison()
            self.results = []
            self.user_control()
            self.start_game()


    def setup_network(self):
        self.move_classifier = ANN(ann_type="rlu2")
        self.errors = []


    def user_control(self):

        while True:
            self.action = input("Press r to play random, t to train, p to play with nn, c to compare results: ")
            if self.action[0] == "t":
                if len(self.action) == 1:
                    output_activations = self.move_classifier.do_training()
                elif self.action[1] == "l":
                    output_activations = self.move_classifier.do_testing()
                elif self.action[1] == "a":
                    points = ai2048demo.welch(self.results_from_random_playing, self.results_from_nn_playing)
                    print("points", points)
            elif self.action[0] == "p" or self.action[0] == "r":
                self.results_length = 50
                return
            elif self.action[0] == "c":
                if len(self.results_from_nn_playing)+len(self.results_from_random_playing) < 100:
                    self.results_length = 50
                    return
                else:
                    self.print_comparison()
            else:
                self.errors = self.move_classifier.do_training(epochs=int(self.action), errors=self.errors)
                output_activations = self.move_classifier.do_testing(boards=self.move_classifier.test_boards)
                print("Statistics (test set):\t\t ", self.move_classifier.check_result(output_activations, labels=self.move_classifier.test_labels), "%")
                output_activations = self.move_classifier.do_testing(boards=self.move_classifier.boards)
                print("Statistics (training set):\t ", self.move_classifier.check_result(output_activations, labels=self.move_classifier.labels), "%")

            print("Total time elapsed: " + str(round((time() - self.start_time)/60, 1)) + " min")

    def run_algorithm(self):
        continuing = True
        if self.game_board.is_game_over():
            largest_tile = self.game_board.get_largest_tile()
            print("largest tile", largest_tile)
            self.results.append(largest_tile)
            print("average tile", sum(self.results)/float(len(self.results)))
            continuing = False
            return self.start_game()
        
        current_node = State(self.game_board, self.depth)
        self.move_count += 1
        flat_board = current_node.board.board[3] + current_node.board.board[2] + current_node.board.board[1] + current_node.board.board[0]
        if self.action[0] == "r":
            chosen_move = self.choose_legal_random_move()
        else:
            case = self.move_classifier.preprosessing(flat_board)
            result = self.move_classifier.predictor(case)
            chosen_move = self.choose_legal_move_from_nn(result)
        if chosen_move == 0:
            self.game_board.move_left()
        elif chosen_move == 1:
            self.game_board.move_right()
        elif chosen_move == 2:
            self.game_board.move_up()
        elif chosen_move == 3:
            self.game_board.move_down()
        else:
            print("Illegal move")
        self.game_board.generate_new_node()
        self.draw_board()
        if continuing:
            self.after(self.delay, lambda: self.run_algorithm())

    def choose_legal_random_move(self):
        while True:
            r = random.randint(0,3)
            if self.game_board.is_move_legal(r):
                return r

    def choose_legal_move_from_nn(self, result):
        chosen_move = None
        while chosen_move == None or not self.game_board.is_move_legal(chosen_move):
            if chosen_move != None:
                result[0][chosen_move] = -1
            chosen_move = np.argmax(result[0])
        return chosen_move

    def print_comparison(self):
        print("NN results:\t", self.results_from_nn_playing)
        print("Random results:\t", self.results_from_random_playing)
        print("largest tiles", max(self.results_from_nn_playing),  max(self.results_from_random_playing))
        print("average tiles", sum(self.results_from_nn_playing)/float(len(self.results_from_nn_playing)), sum(self.results_from_random_playing)/float(len(self.results_from_random_playing)))
        points = ai2048demo.welch(self.results_from_random_playing, self.results_from_nn_playing)
        print("points", points)

    def bind_keys(self):
        self.bind('<Up>', lambda event: self.move(self, self.game_board.move_up()))
        self.bind('<Down>', lambda event: self.move(self, self.game_board.move_down()))
        self.bind('<Right>', lambda event: self.move(self, self.game_board.move_right()))
        self.bind('<Left>', lambda event: self.move(self, self.game_board.move_left()))

    def move(self, event, is_moved):
        if is_moved:
            self.game_board.generate_new_node()
            self.draw_board()

    def draw_board(self):
        self.canvas.delete("all")
        for y in range(self.dim[1]):
                for x in range(self.dim[0]):
                    x1 = x * self.cell_width
                    y1 = self.dim[1]*self.cell_height - y * self.cell_height
                    x2 = x1 + self.cell_width
                    y2 = y1 - self.cell_height
                    cell_type = self.board[y][x]
                    text = str(self.board[y][x])
                    color = self.color_dict[str(self.board[y][x])]
                    self.canvas.create_rectangle(x1, y1, x2, y2, fill=color, tags="rect")
                    if cell_type != 0:
                        self.canvas.create_text(x1+self.cell_width/2, y1-self.cell_height/2, text=text)

    def fill_color_dict(self):
        color_dict = {
            '0': "white",
            '2': "PaleVioletRed1",
            '4': "PaleVioletRed2",
            '8': "hot pink",
            '16': "maroon1",
            '32': "maroon2",
            '64': "DeepPink2",
            '128': "DeepPink3",
            '256': "medium violet red",
            '512': "purple",
            '1024': "dark violet",
            '2048': "dark violet",
            '4096': "purple3",
            '8192': "purple3",
        }
        return color_dict
Exemplo n.º 49
0

train_ii = range(N)
np.random.shuffle(train_ii)

N_train = int(N * .8)
N_test  = N - N_train
test_ii = train_ii[N_train:]
train_ii = train_ii[:N_train]





M = 128
ann = ANN([S, M, S], .0)


total_cnt = 0
total_avr_cost = 0.


avr_cost = 0.
cnt = 0.
EPOCHES = 1000
for e in range(EPOCHES):
    np.random.shuffle(train_ii)

    for i in train_ii:
        tmp = images[i]
Exemplo n.º 50
0
ylb.fit(y)


# split the dataset
X_train, X_test, y_train, y_test = train_test_split(xsc.transform(X), y, random_state=0)


# load nn if exists else train
nn_fname = 'models/nn_iris_3000epochs.pickle'
if os.path.exists(nn_fname):
    # load
    print('loading the nn')
    nn = deserialize(nn_fname)
else:
    # train
    print('training the nn')
    nn = ANN([4, 10, 3])

    nn.train(X_train, ylb.transform(y_train), 3000)

    serialize(nn, nn_fname)

# predict
preds = np.array([nn.predict(example) for example in X_test])
y_pred = ylb.inverse_transform(preds)

# evaluate
print(confusion_matrix(y_test, y_pred))

print(classification_report(y_test, y_pred))
Exemplo n.º 51
0
class FER():
	def __init__(self):
		#change 3rd param
		"""
		Usually you should start with a high learning rate and a low momentum. Then you decrease the learning rate over time
		and increase the momentum. The idea is to allow more exploration at the beginning of the learning and force convergence
		at the end of the learning. Usually you should look at the training error to set up your learning schedule: 
		if it got stuck, i.e. the error does not change, it is time to decrease your learning rate.
		"""
		self.neural = ANN(48*48,7,2,1200,10,0.5)

	def train(self,dataset):
		print "FER Train"
		start = time.clock()
		weights = self.neural.train(dataset,epoch=3)
		result = True
		timex = 0
		
		weight_f = open("weights.txt","w")
		#print "len(weights)",len(weights)
		#print "len(weights[0])",len(weights[0])
		#print "weights[0][1]",weights[0][1]
		
		for layer in range(len(weights)):
			weight_f.write("Layer "+str(layer)+"\n")
  			for e in range(len(weights[layer])):
  				weight_f.write("Node "+str(e)+"\n")
  				for f in range(len(weights[layer][e])):
  					weight_f.write(str(weights[layer][e][f]) + " ")
  				weight_f.write("\n")
  		
  		weight_f.close()

  		end = time.clock()
  		timex = end - start
  		#train; writes the weight on the file; returns true if sucesss, false otherwise; returns time consumed
		return result,timex

	def test(self,dataset,weights):
		start = time.clock()
		correct = 0
		accuracy = 0.0
		result = True
		timex = 0
		count = 1
		for row in dataset:#temporary set to 10
			start = time.clock()
			print 'test_data',count
			prediction = self.predict(row[1],weights,flag=count)
			if prediction == row[0]:
				correct+=1
			
			count += 1
			end = time.clock()
  			timex = end - start
  			print 'test time',timex

		accuracy = correct/len(dataset)

		end = time.clock()
  		timex = end - start
		#returns the accuracy of the algorithm in floating point format; returns true if sucess, false otherwise; returns time consumed
		return accuracy,result,timex

	def predict(self,image,weights,flag=-1):
		#classify a single image, returns the index of result (see main for the legend index:[0,7])
		if flag == 1:
			self.neural.set_weight(weights)
		elif flag == -1:
			self.neural.set_weight(weights)
		output = list(self.neural.classify(image))
		predicted_output = output.index(max(output))
		print "output:",output
		return predicted_output
Exemplo n.º 52
0
class ANN_Trainer:
    def __init__(self, userid):
        self.userid = userid

        self.inputSamples = []
        self.desiredOutputs = []

        for s in models.Ann_samples.objects.filter(userid=userid):
            self.inputSamples.append(eval(s.input))
            self.desiredOutputs.append(eval(s.output))

        self.learningRate = 0.1

        self.init()

    def __del__(self):
        del self.inputSamples
        del self.desiredOutputs

    def init(self):
        self.ann = ANN(self.userid)
        if self.ann.loadData():
            return

        self.ann.init([25, 30, 20, 26])

    def addData(self, inputSamples, desiredOutputs):
        self.sum_errors_prev = [1000, 1000]
        self.sum_errors_last = [1000, 1000]

        self.inputSamples += inputSamples
        self.desiredOutputs += desiredOutputs

    def updateLearningRate(self, sum_errors, sum_errors_prev):
        err = sum_errors[0]
        err_prev = sum_errors_prev[0]

        if err_prev == 0.0:
            return

        if err / err_prev > 1.04:
            self.learningRate *= 0.7
        if err < err_prev:
            self.learningRate *= 1.05

        if self.learningRate > 3.0:
            self.learningRate = 0.1

    def saveData(self, se, episode):
        data = [se, self.sum_errors_last, self.learningRate, episode]

        if not models.Ann_trainer_data.objects.filter(userid=self.userid):
            models.Ann_trainer_data.objects.create(userid=self.userid, data=str(data))
        else:
            models.Ann_trainer_data.objects.filter(userid=self.userid).update(data=str(data))

        del data

    def loadData(self):
        if not models.Ann_trainer_data.objects.filter(userid=self.userid):
            return 0

        data = eval(models.Ann_trainer_data.objects.get(userid=self.userid).data)

        # del self.sum_errors_prev
        # del self.sum_errors_last
        # self.sum_errors_prev = data[0]
        # self.sum_errors_last = data[1]
        self.learningRate = data[2]
        res = data[3]

        del data
        return res

    def train(self, max_error):
        self.sum_errors_prev = [1000, 1000]
        self.sum_errors_last = [1000, 1000]

        episode = self.loadData()

        while True:
            sum_errors = [0, 0]

            for i in range(len(self.inputSamples)):
                self.ann.activate(self.inputSamples[i])

                err = self.ann.updateErrorGradients(self.desiredOutputs[i])
                sum_errors[0] += err[0]
                sum_errors[1] += err[1]
                del err

                if i < len(self.inputSamples) - 1:
                    self.ann.updateWeights(self.learningRate)

            self.updateLearningRate(sum_errors, self.sum_errors_prev)

            if not episode % 10:
                if sum_errors[0] < self.sum_errors_last[0]:
                    self.ann.saveData()
                    # print "net data saved to file ..."
                    self.saveData(sum_errors, episode)
                    # print "trainer data saved to file ..."

                    self.sum_errors_last[0] = sum_errors[0]
                    self.sum_errors_last[1] = sum_errors[1]

                    # print sum_errors, self.learningRate
                    # print "episode =", episode
                    # print "---------------------------"

                    # if sum_errors[0] < 1.5:
                if sum_errors[0] < max_error:
                    break

            self.ann.updateWeights(self.learningRate)

            del self.sum_errors_prev
            self.sum_errors_prev = sum_errors
            episode += 1
Exemplo n.º 53
0
	def test_mnist_8by8_training(self):
		print("test_mnist_8by8_training")
		import time
		import numpy as np
		import matplotlib.pyplot as plt
		from sklearn.cross_validation import train_test_split
		from sklearn.datasets import load_digits
		from sklearn.metrics import confusion_matrix, classification_report
		from sklearn.preprocessing import LabelBinarizer
		from sklearn.metrics import precision_score, recall_score

		# import the simplified mnist dataset from scikit learn
		digits = load_digits()

		# get the input vectors (X is a vector of vectors of type int)
		X = digits.data

		# get the output vector ( y is a vector of type int)
		y = digits.target

		# normalize input into [0, 1]
		X -= X.min()
		X /= X.max()

		# split data into training and testing 75% of examples are used for training and 25% are used for testing
		X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=123)

		# binarize the labels from a number into a vector with a 1 at that index
		labels_train = LabelBinarizer().fit_transform(y_train)
		labels_test = LabelBinarizer().fit_transform(y_test)

		# convert from numpy to normal python list for our simple implementation
		X_train_l = X_train.tolist()
		labels_train_l = labels_train.tolist()

		# create the artificial neuron network with:
		# 1 input layer of size 64 (the images are 8x8 gray pixels)
		# 1 hidden layer of size 100
		# 1 output layer of size 10 (the labels of digits are 0 to 9)
		nn = ANN([64, 100, 10])

		# see how long training takes
		startTime = time.time()

		# train it
		nn.train(10, X_train_l, labels_train_l)

		elapsedTime = time.time() - startTime
		print("time took " + str(elapsedTime))
		self.assertTrue(elapsedTime < 300, 'Training took more than 300 seconds')

		# compute the predictions
		predictions = []
		for i in range(X_test.shape[0]):
			o = nn.predict(X_test[i])
			predictions.append(np.argmax(o))

		# compute a confusion matrix
		# print(confusion_matrix(y_test, predictions))
		# print(classification_report(y_test, predictions))

		precision = precision_score(y_test, predictions, average='macro')
		print("precision", precision)
		recall = recall_score(y_test, predictions, average='macro')
		print("recall", recall)

		self.assertTrue(precision > 0.93, 'Precision must be bigger than 93%')
		self.assertTrue(recall > 0.93, 'Recall must be bigger than 93%')
Exemplo n.º 54
0
    training_set = []
    filepaths = glob.glob(directory + '/*.bmp')
    for fp in filepaths:
        # Open image, convert to grayscale, get data
        pixels = Image.open(fp).convert('L').getdata()
        values = normalize_pixel(np.array(pixels))
        training_set.append((values, output)) 
    return training_set
        
# Creating a master training set which cycles through images of each class
def build_training_sets():
    # Build training sets for each class
    violin_set = load_training_set('image_search/images/resized/violin', [1,0,0,0])
    trumpet_set = load_training_set('image_search/images/resized/trumpet', [0,1,0,0])
    tuba_set = load_training_set('image_search/images/resized/tuba', [0,0,1,0])
    clarinet_set = load_training_set('image_search/images/resized/clarinet', [0,0,0,1])

    # Create a list of tuples containing one of each time of instrument
    zipset = zip(violin_set, trumpet_set, tuba_set, clarinet_set)
    return [inp for subset in zipset for inp in subset] # flatten zipset
    

# ann = ANN(40*40, 300, 4)
ann = ANN.from_file('instrument_ann.npz')
ann.alpha=0.1
instrument_training_set = build_training_sets()
for _ in xrange(5):
    print "Error: %s" % np.mean(ann.train_set(instrument_training_set))
    ann.write_to_file('instrument_ann.npz')
print "Total iterations: %d" % ann.total_iterations
Exemplo n.º 55
0
		[
			[0.25, 0.25], 
			[0.25, 0.25],
			[0.25, 0.25]
		],
		[
			[0.25],
			[0.25],
			[0.25]
		]
]

print datetime.datetime.now()
tprime = lambda x: 1 - x**2
faux_data = [((1,1),1)]
test_ann = ANN(math.tanh, tprime, t_weights, False)
test_ann.populate(faux_data)
print "Numerical Gradient, tanh", test_ann.num_grad()
print "Actual Gradient, tanh", test_ann.calc_err_grad()[0]

test_ann.set_ident(True)
print "Numerical Gradient, ident", test_ann.num_grad()
print "Actual Gradient, ident", test_ann.calc_err_grad()[0]
#Problem 11.2 NN
print datetime.datetime.now()
weights = [np.random.rand(3,10) / 100, np.random.rand(11,1) / 100]
#A test:
ann = ANN(math.tanh, tprime, weights, True)
ann.populate(sampled_data)
#Part a
max_itr = 1000
Exemplo n.º 56
0
#! /usr/bin/python

from ann import ANN

ann = ANN()
ann.loadData()

inputSample = [0.0, 0.0, 0.2407407407407407, 0.79894179894179895, 0.0 , 0.0, 0.10317460317460314, 0.88095238095238093, 0.95767195767195767, 0.11528822055137844 , 0.010582010582010581, 0.74338624338624337, 0.64550264550264558, 0.8306878306878307, 0.35839598997493738 , 0.49470899470899465, 0.8306878306878307, 0.53968253968253976, 0.67460317460317465, 0.62155388471177941 , 0.75396825396825395, 0.19047619047619047, 0.0, 0.082010582010581978, 0.77694235588972438]

ann.activate(inputSample)
print ann.outputs
Exemplo n.º 57
0
class ANN_Trainer:
	def __init__(self, userid):
		self.userid = userid

		self.inputSamples = []
		self.desiredOutputs = []

		for s in models.Ann_samples.objects.filter(userid=userid):
			self.inputSamples.append(eval(s.input))
			self.desiredOutputs.append(eval(s.output))

		self.init()

	def __del__(self):
		del self.inputSamples
		del self.desiredOutputs

	def init(self):
		self.ann = ANN(self.userid)
		if self.ann.loadData():
			return

		self.ann.init([25, 30, 20, 26])


	def addData(self, inputSamples, desiredOutputs):
		self.sum_errors_last = [1000, 1000]

		self.inputSamples   += inputSamples
		self.desiredOutputs += desiredOutputs

	def saveData(self, se, episode):
		data = [se, episode]

		if not models.Ann_trainer_rp_data.objects.filter(userid=self.userid):
			models.Ann_trainer_rp_data.objects.create(userid=self.userid, data=str(data))
		else:
			models.Ann_trainer_rp_data.objects.filter(userid=self.userid).update(data=str(data))

		del data

	def loadData(self):
		return 0

		if not models.Ann_trainer_rp_data.objects.filter(userid=self.userid):
			return 0

		data = eval(models.Ann_trainer_rp_data.objects.get(userid=self.userid).data)

		#del self.sum_errors_last
		#self.sum_errors_last = data[0]
		res = data[1]

		del data
		return res

	def train(self, max_error):
		self.sum_errors_last = [1000, 1000]

		episode = self.loadData()

		while True:
			sum_errors = [0, 0]

			for i in range(len(self.inputSamples)):
				self.ann.activate(self.inputSamples[i])

				err = self.ann.updateErrorGradients(self.desiredOutputs[i])
				sum_errors[0] += err[0]
				sum_errors[1] += err[1]
				del err

				self.ann.updateWeights_slope()

			#print sum_errors[0]
			if not episode % 10:
				if sum_errors[0] < self.sum_errors_last[0]:
					self.ann.saveData()
					#print "net data saved to file ..."
					self.saveData(sum_errors, episode)
					#print "trainer data saved to file ..."

					self.sum_errors_last[0] = sum_errors[0]
					self.sum_errors_last[1] = sum_errors[1]

				#print sum_errors
				#print "episode =", episode
				#print "---------------------------"

				if sum_errors[0] < max_error:
					break

			self.ann.updateWeights()

			del sum_errors
			episode += 1
Exemplo n.º 58
0
from beer.BeerAgent import BeerAgent

def test_ann(shadows, step):
    global ann
    for i in xrange(5):
        n = ann.neurons["s" + str(i)]
        n.output = shadows[i]
        n.step_counter = step

    right = ann.neurons["o0"].update(step)
    left = ann.neurons["o1"].update(step)

    return left, right
test = {'s0': {'tau': 1.592686147215389, 'bias': -2.615998446020173, 'g': 4.064259823298805}, 's3': {'tau': 2.0, 'bias': -9.596911713770295, 'g': 2.699219833009929}, 's2': {'tau': 2.0, 'bias': -7.9381838205981925, 'g': 1.196180415897641}, 's1': {'tau': 1.7131650362733641, 'bias': -9.800554266312117, 'g': 5}, 'h1': {'tau': 1.4759850492782784, 'g': 4.169964788304448, 's3': 10, 's2': -7.597575378784047, 's1': -7.30851842034613, 's0': -5.515947610513402, 's4': 10, 'bias': 0, 'h0': -9.161215598682354}, 's4': {'tau': 1.719469269868766, 'bias': -10, 'g': 1.5148042062097178}, 'h0': {'tau': 1.9805457921140477, 's0': 3.8498836747822374, 'g': 1.1918849794755142, 's3': 0.19606976014775368, 's2': 1.2234104157931345, 's1': -3.9816290689941063, 'h1': -1.752762947517268, 's4': 7.843733074806298, 'bias': -6.082985820301836}, 'o1': {'tau': 1.1177444179110503, 'g': 4.156539205282791, 'h0': 0.5183605142731278, 'h1': 9.884145727513904, 'bias': -3.094239221093069, 'o0': -10}, 'o0': {'tau': 1.5704922925516338, 'g': 4.8005459803723385, 'h0': 10.0, 'h1': -5.936363488987634, 'bias': -2.338789992756717, 'o1': -5.317834057912171}}

ann = ANN(BeerAgent.source)
for neuron in BeerAgent.source_appends:
    for key, weight in neuron["weights"].items():
        ann.add_input(neuron["name"], key, weight, True)


for key, node in test.items():
    for t,n in node.items():
        neu = ann.neurons[key]
        if t == 'tau':
            neu.tau = n
        elif t == 'bias':
            neu.bias = n
        elif t == 'g':
            neu.g = n
        else:
Exemplo n.º 59
0
import sys
sys.path.append('../')
from ann import ANN

def plot_error(data):
    plt.plot(data)
    plt.show()

# Training set for the logical operator XOR
training_set = [ ([0,0],[0]),
                 ([1,0],[1]),
                 ([0,1],[1]),
                 ([1,1],[0]) ]

# Construct a neural net with 2 input nodes, 3 hidden nodes, and 1 output node
ann = ANN(2,3,1,alpha=1)

# Run the network through 1000 iterations of the training set
error_data = [ann.train_set(training_set) for _ in range(2000)]

# Plot the mean error for each iteration
none, left, right, both = zip(*error_data)

none_line, = plt.plot(none)
left_line, = plt.plot(left)
right_line, = plt.plot(right)
both_line, = plt.plot(both)

plt.legend([none_line, left_line, right_line, both_line], ['[0,0]', '[1,0]', '[0,1]', '[1,1]'])

plt.xlabel('Iterations')
Exemplo n.º 60
0
    def init(self):
        self.ann = ANN(self.userid)
        if self.ann.loadData():
            return

        self.ann.init([25, 30, 20, 26])