def __init__(self,
                 D_hidden_dim,
                 G_hidden_dim,
                 z_dim,
                 hyperparams={},
                 dataset='mnist',
                 image_dim=None):

        self.dataset = dataset
        if dataset.lower() == 'mnist':
            image_dim = 28 * 28
            self.digit = hyperparams.get("digit", 2)
        elif dataset.lower() == 'celeba_bw':
            print("This basic GAN version probably will not converge. \
				See TTitcombe/GANmodels for more powerful versions \
				(in development)")
            image_dim = 178 * 218
        elif dataset is None and image_dim is None:
            raise RuntimeError("You must either define a recognised dataset \
								or define an input image dimension")
        else:
            raise NotImplementedError("The dataset you have selected \
			 							is not recognised")

        self.epochs = hyperparams.get("epochs", 100)
        self.batchSize = hyperparams.get("batchSize", 64)
        self.lr = hyperparams.get("lr", 0.001)
        self.decay = hyperparams.get("decay", 1.)
        self.epsilon = hyperparams.get("epsilon", 1e-7)  #avoid overflow

        self.D = ANN(image_dim, D_hidden_dim, 1, self.lr, False)
        self.G = ANN(z_dim, G_hidden_dim, image_dim, self.lr, True)
Esempio n. 2
0
def exp5(data_class, N=6, K=7, max_iter=50):
    """Apply the clustering algorithms to the same dataset to which you just
    applied the dimensionality reduction algorithms, treating the clusters as
    if they were new (additional) features. Rerun your neural network leaner
    on the newly projected data."""

    # Load dataset
    data = data_class()

    # Set up dimensionality reduction and clustering pipelines
    dim_red = dim_red_pipelines(N)
    cluster = cluster_pipelines(K)

    # Build the neural network without dimensionality reduction
    nn = ANN()
    nn.train = nn.load_data(data.train.X, data.train.Y, data.n_class)
    nn.test = nn.load_data(data.test.X, data.test.Y, data.n_class)
    nn.make_network()
    nn.make_trainer()

    # Apply dimensionality reduction + clustering, then run neural network
    for dr in dim_red:
        for ca in cluster:
            # Print name of algorithms used
            print '\n{} & {}'.format(dr.steps[-1][0], ca.steps[-1][0])

            # Apply dimensionality reduction algorithm to training and test sets.
            if dr.steps[-1][0] != 'LDA':
                train_X = dr.fit_transform(data.train.X)
            else:
                train_X = dr.fit_transform(data.train.X, data.train.Y)
            test_X = dr.transform(data.test.X)

            # Apply clustering to the reduced dimensionality dataset
            if ca.steps[-1][0] == "EM":
                ca.steps[-1][1].fit(train_X)
                C_train = ca.steps[-1][1].predict(train_X)
                C_test = ca.steps[-1][1].predict(test_X)
            else:
                ca.fit(train_X)
                C_train = ca.predict(train_X)
                C_test = ca.predict(test_X)

            # Add cluster assignment as a feature
            train_X = [np.append(x, c) for x, c in zip(train_X, C_train)]
            test_X = [np.append(x, c) for x, c in zip(test_X, C_test)]

            # Build neural network
            nn = ANN()
            nn.train = nn.load_data(train_X, data.train.Y, data.n_class)
            nn.test = nn.load_data(test_X, data.test.Y, data.n_class)
            nn.make_network()
            nn.make_trainer()

            # Run neural network
            for iter in range(max_iter):
                nn.train_network()
                print 'iter: {}  train: {}  test: {}'.format(
                    iter, nn.fitf(), nn.fitf(train=False))
Esempio n. 3
0
    def getAnns():
        builderNoConv = BuilderCNN_MNIST(removeConvLayers=True)

        cnn1 = ANN(BuilderCNN_MNIST(18))
        dnn_huConv = ANN(builderNoConv, PreprocessorHuConv4Dnn(cnn1))
        dnn_conv = ANN(builderNoConv,
                       PreprocessorHuConv4Dnn(cnn1, removeHuPreprocess=True))

        cnn2 = ANN(BuilderCNN_MNIST(18 + 7))
        dnn_conv2 = ANN(builderNoConv,
                        PreprocessorHuConv4Dnn(cnn2, removeHuPreprocess=True))

        return [cnn1, dnn_huConv, dnn_conv, cnn2, dnn_conv2]
Esempio n. 4
0
def exp4(data_class, N=6, max_iter=50):
    """Apply the dimensionality reduction algorithms to one of your datasets
    from assignment #1, then rerun your neural network learner on the newly
    projected data."""

    # Load "clean" dataset
    data = data_class()

    # Set up dimensionality reduction pipelines
    dim_red = dim_red_pipelines(N)

    # Build the neural network without dimensionality reduction
    nn = ANN()
    nn.train = nn.load_data(data.train.X, data.train.Y, data.n_class)
    nn.test = nn.load_data(data.test.X, data.test.Y, data.n_class)
    nn.make_network()
    nn.make_trainer()

    # Train and run the neural network as a baseline
    print 'Baseline neural network (no dimensionality reduction)'
    for iter in range(max_iter):
        nn.train_network()
        print 'iter: {}  train: {}  test: {}'.format(iter, nn.fitf(),
                                                     nn.fitf(train=False))

    # Apply dimensionality reduction and run neural network for all algorithms
    for dr in dim_red:
        # Print name of algorithms used
        print '\n{}'.format(dr.steps[-1][0])

        # Apply dimensionality reduction algorithm to training and test sets.
        if dr.steps[-1][0] != 'LDA':
            train_X = dr.fit_transform(data.train.X)
        else:
            train_X = dr.fit_transform(data.train.X, data.train.Y)
        test_X = dr.transform(data.test.X)

        # Build neural network
        nn = ANN()
        nn.train = nn.load_data(train_X, data.train.Y, data.n_class)
        nn.test = nn.load_data(test_X, data.test.Y, data.n_class)
        nn.make_network()
        nn.make_trainer()

        # Run neural network
        for iter in range(max_iter):
            nn.train_network()
            print 'iter: {}  train: {}  test: {}'.format(
                iter, nn.fitf(), nn.fitf(train=False))
Esempio n. 5
0
def ann_bag(training_set, validation_set, num_hidden_units, weight_decay_coeff,
            num_ann_training_iters, num_bagging_training_iters):
    iter_labels = None
    example_weights = np.full((training_set.shape[0], 1),
                              1.0 / len(training_set))
    for i in xrange(0, num_bagging_training_iters):
        print('\nBagging Iteration ' + str(i + 1))
        replicate_set = bootstrap_replicate(training_set, seed_value=i)
        weighted_replicate_set = np.column_stack(
            (example_weights, replicate_set))
        ann = ANN(weighted_replicate_set,
                  validation_set,
                  num_hidden_units,
                  weight_decay_coeff,
                  weighted_examples=True)
        ann.train(num_ann_training_iters, convergence_err=0.5)
        if iter_labels is not None:
            iter_labels = np.column_stack((iter_labels, ann.evaluate()[1]))
        else:
            iter_labels = ann.evaluate()[1]
    voting_labels = np.apply_along_axis(most_common_label, 1, iter_labels)
    assert ann is not None
    actual_labels = ann.validation_labels
    label_pairs = zip(actual_labels, voting_labels)
    accuracy, precision, recall, fpr = evaluate_ann_performance(
        None, label_pairs)
    return accuracy, precision, recall, fpr
Esempio n. 6
0
def main():
    # Fixes numpy's random seed
    np.random.seed(0)

    print(f"Loading train and validate datasets...")
    train_data, train_tags = load_dataset(TRAIN_CSV_PATH)
    validate_data, validate_tags = load_dataset(VALIDATE_CSV_PATH)
    print(f"Loaded datasets successfully")

    ann = ANN()
    ann.add_layer(number_of_neurons=256,
                  activation_function=Relu,
                  input_dim=3072)
    ann.add_layer(number_of_neurons=128, activation_function=Relu)
    ann.add_layer(number_of_neurons=10, activation_function=Softmax)

    create_output_dir(MODELS_DIR)

    print(f"Starting the ANN train process...")
    for i in range(START_EPOCH, EPOCHS + START_EPOCH):
        ann.train(train_data,
                  train_tags,
                  alpha=0.0005,
                  epochs=1,
                  noise_factor=0.8)
        acc_train = ann.evaluate(train_data, train_tags)
        acc_validate = ann.evaluate(validate_data, validate_tags)

        model_file_name = f"{i}_{acc_train * 100:.3f}_{acc_validate * 100:.3f}" + ANN.EXTENSION
        print(
            f"Epoch: {i}, Train accuracy: {acc_train * 100:.3f}, Validate accuracy: {acc_validate * 100:.3f}"
        )
        ann.save(os.path.join(MODELS_DIR, model_file_name))
Esempio n. 7
0
def evaluate(ind):
    ann = ANN(ind)
    error = 0.0;
    for i in range(0,inputs.shape[0]):
        out=ann.evaluate(inputs[i])
        error = error + ((out[0]-outputs[i][0])**2) + ((out[1]-outputs[i][1])**2)
    return error,
def analyzeSymbol(symbol):
    startTime = time.time()
    flag = 0
    trainingData = getTrainingData(symbol)

    network = ANN(inNode=3, hiddenNode=3, outNode=1)

    network.training(trainingData)

    # get rolling data for most recent day

    #network.training(trainingData)
    for i in range(0, 5):
        # get rolling data for most recent day
        predictionData = getPredictionData(symbol, flag)
        returnPrice = network.test(predictionData)

        # de-normalize and return predicted stock price
        predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                               predictionData[2])

        print predictedStockPrice
        flag += 1
        global new_value
        new_value = predictedStockPrice

    return predictedStockPrice
Esempio n. 9
0
def compare_to_ann():
    """
    Regression problem on two functions (sin, square) with added noise
    using batch learning with a 2 layer neural network
    """

    epochs = 5000
    hidden_neurons = 63  # same number as RBF nodes
    output_neurons = 1

    sin, square = generate_data.sin_square(verbose=verbose)
    sin = add_noise_to_data(sin)
    square = add_noise_to_data(square)

    data = square  # use which dataset to train and test

    batch_size = data.train_X.shape[
        0]  # set batch size equal to number of data points

    ann = ANN(epochs, batch_size, hidden_neurons, output_neurons)

    y_pred = ann.solve(data.train_X, data.train_Y, data.test_X, data.test_Y)

    error = 0.
    for i in range(data.test_Y.shape[0]):
        error += np.abs(data.test_Y[i] - y_pred[i])

    test_error = error / data.test_Y.shape[0]

    print('Test error: ', test_error)

    plotter.plot_2d_function(data.test_X, data.test_Y, y_pred=y_pred)
Esempio n. 10
0
 def __init__(self, input_size, num_hidden_layers, hidden_layer_sizes,
             output_size, epochs=50, batch_size=1, fit_verbose=2,
             variables=None, weight_file=''):
     super().__init__()
     self.weight_file = weight_file
     self.model = ANN(input_size, num_hidden_layers, hidden_layer_sizes,
                     output_size, epochs=epochs, batch_size=batch_size,
                     fit_verbose=fit_verbose, variables=variables)
Esempio n. 11
0
def test(ann, images, img_size, S, F, neurons_num):

    ww, bb = ann.get_weights()
    ss = ann.ss[:len(ann.ss) / 2 + 1]
    flt = ANN(ss, .0)
    ww_size = 0
    bb_size = 0
    for l in range(1, len(ss)):
        ww_size += ss[l - 1] * ss[l]
        bb_size += ss[l]
    flt.set_weights(ww[:ww_size], bb[:bb_size])

    N = images.shape[0]
    ii = range(N)
    np.random.shuffle(ii)
    ii = ii[:10]

    to_show = None
    for i in ii:
        tmp = images[i].copy()
        tmp = tmp.reshape((img_size, img_size))

        flt_img = np.zeros((neurons_num, neurons_num))

        r = 0
        c = 0
        for patch in patches(tmp, img_size, S, F):
            p = flt.predict_proba(patch.flatten())

            flt_img[r, c] = p[0, 1]
            c = (c + 1) % neurons_num
            if c == 0:
                r = (r + 1) % neurons_num

        tmp = tmp.reshape((img_size, img_size))

        tmp *= 255
        flt_img *= 255

        flt_img = np.concatenate(
            (flt_img, [[0] * (img_size - neurons_num)] * neurons_num), axis=1)
        flt_img = np.concatenate(
            (flt_img, [[0] * img_size] * (img_size - neurons_num)), axis=0)

        if to_show == None:
            to_show = np.concatenate((tmp, flt_img), axis=1)
        else:
            to_show = np.concatenate(
                (to_show, np.concatenate((tmp, flt_img), axis=1)), axis=0)

    to_show = to_show.astype(np.uint8)

    plot.clf()
    plot.imshow(to_show)
    plot.show()
Esempio n. 12
0
def main(argv):
    argc = len(argv)
    if argc < 2:
        print(get_help())
        exit(0)

    if argv[1] == 't':
        net = ANN(Functions.SIGMOID, Functions.MSE)
        nb = NaiveBayes()
        bn = Bernoulli()
        selected_tweets = reader.read(argv[2])
        rejected_tweets = reader.read(argv[3])
        t1 = threading.Thread(target=train_net,\
                              args=(net, selected_tweets, rejected_tweets))
        t2 = threading.Thread(target=train_nb,\
                              args=(nb, selected_tweets, rejected_tweets))
        t3 = threading.Thread(target=train_bn,\
                              args=(bn, selected_tweets, rejected_tweets))
        t1.start()
        t2.start()
        t3.start()
        t1.join()
        t2.join()
        t3.join()

    elif argv[1] == 'c':
        f_net = open(argv[2], 'rb')
        net = pickle.load(f_net)
        f_net.close()

        f_nb = open(argv[3], 'rb')
        nb = pickle.load(f_nb)
        f_nb.close()

        f_bn = open(argv[4], 'rb')
        bn = pickle.load(f_bn)
        f_bn.close()

        tweets = reader.read(argv[5])

        t1 = threading.Thread(target=classify_using_net,\
                              args=(net, tweets))
        t2 = threading.Thread(target=classify_using_nb,\
                              args=(nb, tweets))
        t3 = threading.Thread(target=classify_using_bn,\
                              args=(bn, tweets))
        t1.start()
        t2.start()
        t3.start()
        t1.join()
        t2.join()
        t3.join()
    else:
        print(get_help())
        exit(0)
Esempio n. 13
0
    def __init__(self, x, y, W, H, food):
        super(Animal, self).__init__(x, y, W, H)

        self.brain = ANN([1, 2])
        """ the orientation of the animal in the environment (range: 0 to 2pi) """
        self.orientation = 0
        """ initialise speed """
        self.speed = ANIMAL_MOVE_SPEED
        """ number of food eaten in this generation by this animal """
        self.num_food = 0

        self.food = food
 def train(self, hidden_layers, epochs, learning_rate):
     labels = self.dp_train.labels()
     for label in labels:
         data = self.dp_train.binarizeU(label, upsampled=True)
         X = data[:, 0:-1]
         y_train_logistic = data[:, -1]
         logistic_classifier = ANN(hidden_layers,
                                   epochs,
                                   learning_rate,
                                   verbose=False)
         logistic_classifier.train(X, y_train_logistic)
         self.logistic_classifiers.append(logistic_classifier)
Esempio n. 15
0
 def train(self, hidden_layers, epochs, learning_rate):
     labels = self.dp_train.labels()
     self.labels_combination = list(itertools.combinations(labels, 2))
     for labels in self.labels_combination:
         data = self.dp_train.binarize(labels)
         X = data[:, 0:-1]
         y = data[:, -1]
         logistic_classifier = ANN(hidden_layers,
                                   epochs,
                                   learning_rate,
                                   verbose=False)
         logistic_classifier.train(X, y)
         self.logistic_classifiers.append(logistic_classifier)
Esempio n. 16
0
    def __init__(self, input_size, hidden_layers, output_size):

        # network input
        self.X = tf.placeholder(tf.float32, [None, input_size], name='X')

        # encoders and decoder are just fully connected networks
        self.encoder = ANN(input_size, hidden_layers, output_size)
        M = output_size // 2
        self.decoder = ANN(M, hidden_layers[::-1], input_size)

        # Construct the sampling distribution form the output of the encoder
        self.encoder_out = self.encoder.forward(self.X)
        self.means = self.encoder_out[:, :M]
        self.stddev = tf.nn.softplus(self.encoder_out[:, M:]) + 1e-6

        with st.value_type(st.SampleValue()):
            self.Z = st.StochasticTensor(
                Normal(loc=self.means, scale=self.stddev))

        # network output
        self.logits = self.decoder.forward(self.Z)
        self.pX = Bernoulli(logits=self.logits)

        # Prior predictive sample
        standard_normal = Normal(loc=np.zeros(M, dtype=np.float32),
                                 scale=np.ones(M, dtype=np.float32))

        # initialize cost and training
        kl = tf.reduce_sum(
            tf.contrib.distributions.kl_divergence(self.Z.distribution,
                                                   standard_normal), 1)

        expected_log_likelihood = tf.reduce_sum(self.pX.log_prob(self.X), 1)

        self.elbo = tf.reduce_sum(expected_log_likelihood - kl)
        self.train_op = tf.train.RMSPropOptimizer(
            learning_rate=0.001).minimize(-self.elbo)

        self.X_hat = self.pX.sample()
Esempio n. 17
0
def main():
    if len(sys.argv) != 4:
        print("Invalid number of arguments.")
        print(
            f"Expected usage: python {sys.argv[0]} train_images train_labels validation_images"
        )
        return

    test_label_path = None
    history_images = None
    history_labels = None
    history_accuracy = None
    if devel:
        test_label_path = "validation-labels.txt"

    (train_images, train_labels, test_images,
     test_labels) = load_data(sys.argv[1], sys.argv[2], sys.argv[3],
                              test_label_path)
    train_labels_ohv = labels_to_1_hot(train_labels)

    if devel:
        test_labels_ohv = labels_to_1_hot(test_labels)
        history_images = test_images
        history_labels = test_labels_ohv
        history_accuracy = calculate_accuracy

    network = ANN(784, 'mean_square_error', regularization='L2', lambd=0.01)
    network.add_layer(10, 'leaky_relu')
    network.add_layer(10, 'leaky_relu')
    network.add_layer(10, 'tanh')

    (train_loss, test_loss, train_acc,
     test_acc) = network.train(train_images, train_labels_ohv, 40, 10, 0.1,
                               0.1 / 40, devel, history_images, history_labels,
                               history_accuracy)

    test_out = network.eval(test_images)

    if not devel:
        labels = test_out.argmax(1)
        for i in labels:
            print(f"{i}")
    else:
        xaxis = [x for x in range(train_loss.size)]
        plt.figure(1)
        plt.plot(xaxis, train_loss, xaxis, test_loss)
        plt.legend(("Training loss", "Test loss"))
        plt.figure(2)
        line = plt.plot(xaxis, train_acc, xaxis, test_acc)
        plt.legend(("Training accuracy", "Test accuracy"))
        plt.show()
def main():
    nn = ANN([5, 5, 1], Utils.linear, Utils.linear_derivative)
    u, t = Utils.readData()

    for i in range(100):
        for j in range(len(u)):
            nn.backPropag(nn.computeLoss(u[j], t[j]), 0.01)

    # compute the errors
    diff = []
    for i in range(len(u)):
        predicted = nn.feedForward(u[i])
        diff.append(abs(predicted[0] - t[i][0]))
        print("Actual: {}, Predicted:{}".format(t[i][0], predicted[0]))

    print("Mean of errors: {}".format(mean(diff)))
Esempio n. 19
0
def train(mode='new', train_times=100, lr=0.1, **kwargs):
    global data_len

    if mode in ['new', 'continue']:
        text = load_data()
        data = sample_data(text)
        print data[0].shape
        print data[1].shape
        l = int(data_size * 0.7)
        datasets = [(shared(data[0][:l]), shared(data[1][:l])),
                    (shared(data[0][l:]), shared(data[1][l:]))]

        #ann
        theano.config.exception_verbosity = 'high'
        theano.config.on_unused_input = 'ignore'

    if mode == 'new':
        cl = ANN(data_len * alphanum,
                 alphanum,
                 hiddens=[300, 300, 200],
                 lmbd=0)
        cl.fit(datasets,
               lr=theano.tensor.cast(lr, theano.config.floatX),
               n_epochs=train_times,
               batch_size=200)

        dump(cl, open('save.dat', 'wb'))
    elif mode == 'continue':
        try:
            os.rename('save.dat', 'origin.dat')
        except:
            pass
        cl = load(open('origin.dat', 'rb'))
        print cl
        cl.fit(datasets,
               lr=theano.tensor.cast(lr, theano.config.floatX),
               n_epochs=train_times,
               batch_size=200)

        dump(cl, open('save.dat', 'wb'))

    elif mode == 'create':
        cl = load(open('origin.dat', 'rb'))
        create(**kwargs)

    return cl
Esempio n. 20
0
    def run(self, lambd=0, keep_prob=1):

        train_X, train_Y, test_X, test_Y = data_service.load_2D_dataset()

        ann = ANN()
        learning_rate = 0.3
        parameters, costs = ann.fit(train_X,
                                    train_Y,
                                    learning_rate=learning_rate,
                                    lambd=lambd,
                                    keep_prob=keep_prob)

        plotting_service.plot_loss_per_iteration_for_learning_rate(
            costs, learning_rate)

        plotting_service.plot_decision_boundary(
            lambda x: ann_service.predict_dec(parameters, x.T), train_X,
            train_Y)
Esempio n. 21
0
def main():
    datasets = getDatasets()

    # Initialize NN instance
    nn = ANN(
        dims=(input_dim, 256, output_dim),
        activation='sigmoid',
        plot=draw_plots,
    )

    nn.train(x_m=datasets['train_x_m'],
             y_m=datasets['train_y_m'],
             learning_rate=0.5,
             max_batch_size=1,
             momentum_factor=0.1,
             max_epochs=5)

    nn.test(x_m=datasets['test_x_m'], y_m=datasets['test_y_m'])
Esempio n. 22
0
def ann_boost(training_set, validation_set, num_hidden_units,
              weight_decay_coeff, num_ann_training_iters,
              num_boosting_training_iters):
    # Add a column to the front of the example matrix containing the initial weight for each example
    example_weights = np.full((training_set.shape[0], 1),
                              1.0 / len(training_set))
    anns = []
    alphas = []
    for i in xrange(0, num_boosting_training_iters):
        print('\nBoosting Iteration ' + str(i + 1))
        weighted_training_set = np.column_stack(
            (example_weights, training_set))
        ann = ANN(weighted_training_set,
                  validation_set,
                  num_hidden_units,
                  weight_decay_coeff,
                  weighted_examples=True)
        ann.train(num_ann_training_iters, convergence_err=0.5, min_iters=1)
        actual_labels = ann.training_labels
        assigned_labels = ann.output_labels
        error = weighted_training_error(example_weights, actual_labels,
                                        assigned_labels)
        alpha = classifier_weight(error)
        print('\n\talpha: ' + str(alpha))
        if alpha == float('inf'):
            alphas = [float('inf')]
            anns = [ann]
            break
        anns.append(ann)
        alphas.append(alpha)
        if alpha != 0.0:
            example_weights = update_example_weights(example_weights, alpha,
                                                     actual_labels,
                                                     assigned_labels)
        else:
            break
    alphas = np.array(alphas)
    vote_labels = weighted_vote_labels(anns, alphas)
    assert ann is not None
    actual_labels = ann.validation_labels
    label_pairs = zip(actual_labels, vote_labels)
    accuracy, precision, recall, fpr = evaluate_ann_performance(
        None, label_pairs)
    return accuracy, precision, recall, fpr
Esempio n. 23
0
def run_model(which='all'):
    if which in ['ann', 'all', 'main', 'standard']:
        model = ANN(emb_size, vocab_size, hid_dim, hid_num, class_num,
                    sent_len).cuda()
        ann_loss = train(model, x, target, ann=True)
        plt.plot(ann_loss, label='ann')
    if which in ['wann', 'all', 'standard']:
        model = WANN(emb_size, vocab_size, hid_dim, hid_num, class_num,
                     sent_len).cuda()
        wann_loss = train(model, x, target, ann=True)
        plt.plot(wann_loss, label='wann')
    if which in ['rnn', 'all', 'main']:
        model = RNN(emb_size, vocab_size, hid_dim, hid_num, class_num).cuda()
        rnn_loss = train(model, x, target)
        plt.plot(rnn_loss, label='rnn')
    if which in ['exrnn', 'all']:
        model = EXRNN(emb_size, vocab_size, hid_dim, hid_num, class_num, 2000,
                      2000).cuda()
        exrnn_loss = train(model, x, target)
        plt.plot(exrnn_loss, label='exrnn')
    if which in ['exmem', 'all']:
        model = EXRNN(emb_size,
                      vocab_size,
                      hid_dim,
                      hid_num,
                      class_num,
                      2000,
                      forget_dim=None).cuda()
        exmem_loss = train(model, x, target)
        plt.plot(exmem_loss, label='exmem')
    if which in ['lstm', 'all', 'main']:
        model = LSTM(emb_size, vocab_size, hid_dim, hid_num, class_num).cuda()
        lstm_loss = train(model, x, target)
        plt.plot(lstm_loss, label='lstm')
    if which in ['gru', 'all', 'main']:
        model = GRU(emb_size, vocab_size, hid_dim, hid_num, class_num).cuda()
        gru_loss = train(model, x, target)
        plt.plot(gru_loss, label='gru')
    # plt.ylim([0, 2])
    plt.legend()
    plt.grid(True)
    plt.show()
Esempio n. 24
0
def train_ANN_PSO(inputs,
                  res_ex,
                  max_iter,
                  n_particle,
                  n_neighbor,
                  nb_h_layers,
                  nb_neurons_layer,
                  min_bound,
                  max_bound,
                  cognitive_trust,
                  social_trust,
                  inertia_start,
                  inertia_end,
                  velocity_max,
                  activations,
                  draw_graph=False):
    nb_neurons = set_nb_neurons(len(inputs[0]), nb_neurons_layer, nb_h_layers)
    # print(nb_neurons, n_neighbor, activations)
    ann = ANN(nb_neurons=nb_neurons,
              nb_layers=len(nb_neurons),
              activations=activations)
    dim = sum(nb_neurons[i] * nb_neurons[i + 1]
              for i in range(len(nb_neurons) - 1)) + len(nb_neurons) - 1
    pso = PSO(dim,
              lambda params: fitness_for_ann(params, ann, inputs, res_ex),
              max_iter=max_iter,
              n_particle=n_particle,
              n_neighbor=n_neighbor,
              comparator=minimise,
              min_bound=min_bound,
              max_bound=max_bound,
              cognitive_trust=cognitive_trust,
              social_trust=social_trust,
              inertia_start=inertia_start,
              inertia_end=inertia_end,
              velocity_max=velocity_max,
              endl='',
              version=2007)
    if draw_graph:
        pso.set_graph_config(inputs=inputs, res_ex=res_ex)
    pso.run()
    return pso, ann
Esempio n. 25
0
def bias_influence(X,Y):
    '''
    illustrate the capabilities of the model without the addition of bias
    Using batch learning and the delta_rule
    :param X: the input data (N (number of inputs) x M (number of features before bias)
    :param Y: the output targets (1 x N)
    '''
    verbose = False
    params = {
        "learning_rate": 0.1,
        "batch_size": 1,
        "theta": 0,
        "epsilon": 0.0,  # slack for error during training
        "epochs": 50,
        "act_fun": 'step',
        "test_data": None,
        "test_targets": None,
        "m_weights": 0,
        "sigma_weights": 0.5,
        "nodes": 1,
        "learn_method": 'delta_rule',
        "bias": 0
    }

    training_method = 'sequential'  # 'batch' , 'sequential'

    ann = ANN(X, Y, **params)

    ann.train(training_method, verbose=verbose)

    if (params["bias"] == 0):
        title = 'Learning without bias'
    else:
        title = 'Learning with bias'

    ann.plot_decision_boundary(
            data=ann.train_data,
            plot_intermediate=True,
            title=title,
            data_coloring = ann.train_targets,
            origin_grid=True
            )
Esempio n. 26
0
    def __init__(self):
        # load the layers of sketch-a-net with pretrained weights
        self.layers = load_layers('./data/model_without_order_info_224.mat')

        # load the corpuses of mark matches at layers 2 and 4
        # layer 2 matches low level marks like lines and
        # layer 4 matches higher levels marks like closed forms.
        with open('./data/corpus' + file_extension + '4.txt', 'rb') as fp:
            imgs, acts = pickle.load(fp)
            self.imgs4 = imgs
        with open('./data/corpus' + file_extension + '2.txt', 'rb') as fp:
            imgs, acts = pickle.load(fp)
            self.imgs2 = imgs

        # init the approximate nearest neighbors class for mark matching
        # the results from this can be fetched from self.imgs*
        self.ANN = ANN()

        # the layers for this repeater from sketch-a-net
        self.layer_names = ['conv1', 'conv2', 'conv3', 'conv4']
Esempio n. 27
0
def perform_ova_single_nn():
    ann = ANN((100, ), 5000, 0.01, verbose=False)

    parameter_space = {
        'hidden_layer_sizes': [(50, ), (100, ), (150, ), (200, ), (50, 50),
                               (50, 100), (100, 50), (200, 50)],
        'learning_rate_init': [0.005, 0.01, 0.015, 0.02, 0.025],
        'max_iter': [2000, 3000, 4000, 5000]
    }
    grid = GridSearchCV(ann.mlp, parameter_space, n_jobs=-1, cv=3)
    grid.fit(x_train, y_train)
    print('Best parameters found:\n', grid.best_params_)
    print('Results on the test set:\n',
          classification_report(y_test, grid.predict(x_test)))
    # print('Results on the test set:\n', confusion_matrix(y_test, grid.predict(x_test)))

    means = grid.cv_results_['mean_test_score']
    stds = grid.cv_results_['std_test_score']
    params = grid.cv_results_['params']
    for mean, stdev, param in zip(means, stds, params):
        print("%f (%f) with: %r" % (mean, stdev, param))
Esempio n. 28
0
def analyzeSymbol(stockSymbol):
    startTime = time.time()

    trainingData = getTrainingData(stockSymbol)

    network = ANN(inNode=3, hiddenNode=3, outNode=1)

    network.training(trainingData)

    # get rolling data for most recent day
    predictionData = getPredictionData(stockSymbol)

    # get prediction
    returnPrice = network.test(predictionData)

    # de-normalize and return predicted stock price
    predictedStockPrice = denormalizePrice(returnPrice, predictionData[1],
                                           predictionData[2])

    # create return object, including the amount of time used to predict

    return predictedStockPrice
Esempio n. 29
0
def compare_learning_rate(X, Y):
    """
    This function studies the convergence while varying the learning rate
    :param X: Training inputs
    :param Y: Training targets
    :return: plot of the evolution of the error along the iterations
    for different values of the learning rate

    For this function the update rule and the method (batch/sequential) has to be changed
    manually in params "learn_method" and in the training function respectively

    """

    fig, ax = plt.subplots()
    eta = np.linspace(0.0005, 0.0015, 5)
    for e in eta:
        params = {
            "learning_rate": e,
            "batch_size": N,
            "theta": 0,
            "epsilon": -0.1,  # slack for error during training
            "epochs": 10,
            "act_fun": 'step',
            "m_weights": 0.9,
            "sigma_weights": 0.9,
            "nodes": 1,
            "learn_method": 'delta_rule' #'delta_rule'
        }

        training_method = 'sequential'  # 'batch' , 'sequential'

        ann = ANN(X, Y, **params)

        ann.train(training_method, verbose=verbose)

        ax.plot(range(len(ann.error_history)), ann.error_history, label='$\eta = {}$'.format(e))
        #ax.set_xlim(0, 40)
    ax.legend()
    plt.show()
Esempio n. 30
0
    def __init__(self, population):
        '''
        Create game AI.

        :param population: Number of AI to evolve.
        '''
        print("Creating game AI.")
        self.genomes = list()
        self.anns = list()
        self.generation = 1
        self.last_avg_fit = 0.0

        index = 0
        while population > 0:
            print("AI's left: " + str(population))
            ann = ANN()
            self.anns.append(ann)
            # Names will get strange if lots of brains are created
            self.genomes.append(
                FloatGenome(ann.get_internal_data(), 0.0,
                            chr(ord('a') + index)))
            population -= 1
            index += 1