def main():
    """
    Run the test case XOR
    """

    print("...Reading dataset")
    dataset = load_dataset("datasets/xor.dat")
    print("...done!")

    print("...Spliting the dataset")
    training_samples, testing_samples, labels_train, labels_test = split_dataset(
        dataset)
    print("...done!")

    print("...Creating the classifier")
    clf = MLP(input_layer=2, hidden=2, output=1)
    print("...done!")

    print("...Fiting the clf")
    clf.fit(training_samples, labels_train, verbose_error=True)
    print("...done!")

    print("...Made a prediction")
    pred = clf.predict(testing_samples)
    print("...done!")

    print('Convergence: with MSE:{}'.format(clf.error))

    print(clf)

    print(
        pd.DataFrame.from_items([('Expected', labels_test),
                                 ('Obtained', pred)]))

    clf.plot_errors()
Esempio n. 2
0
def main(argv):
    argv = FLAGS(argv)
    inputs, outputs = load_CIFAR_train(FLAGS.datapath)
    X_test, y_test = load_CIFAR_test(FLAGS.datapath)
    nn = MLP(3072, FLAGS.hidden_dim, 10, FLAGS.activation, FLAGS.loss_type, FLAGS.layer_num)
    nn.fit(inputs, outputs, FLAGS.epoch, FLAGS.batch, [FLAGS.lr_W, FLAGS.lr_b],
            X_test, y_test)
    print nn.test(X_test, y_test)
Esempio n. 3
0
def experiment_train_val_seq_batch_mlp():
    use_validation_set = False
    case = 1

    [inputs, inputs_labels, input_validation,
     input_validation_labels] = Utils.create_non_linearly_separable_data_2(
         use_validation_set=use_validation_set, case=case)

    num_hidden_nodes_layer_1 = 20
    num_iterations = 1000
    learning_rate = 0.002
    verbose = False

    mlp_batch = MLP(inputs=inputs,
                    inputs_labels=inputs_labels,
                    input_validation=input_validation,
                    input_validation_labels=input_validation_labels,
                    num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                    num_iterations=num_iterations,
                    learning_rate=learning_rate,
                    batch_train=True,
                    verbose=verbose)

    [_, _, mse_batch] = mlp_batch.fit()
    train_batch_mse_batch = mlp_batch.mse
    eval_batch_mse_batch = mlp_batch.validation_mse

    Utils.plot_decision_boundary_mlp(
        inputs, inputs_labels, mlp_batch,
        'MLP with learning rate {0}, iterations {1} , num hidden nodes {2}'.
        format(learning_rate, num_iterations, num_hidden_nodes_layer_1))

    mlp_seq = MLP(inputs=inputs,
                  inputs_labels=inputs_labels,
                  input_validation=input_validation,
                  input_validation_labels=input_validation_labels,
                  num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                  num_iterations=num_iterations,
                  learning_rate=learning_rate,
                  batch_train=False,
                  verbose=verbose)

    [_, _, mse_seq] = mlp_seq.fit()
    train_seq_mse_batch = mlp_seq.mse
    eval_seq_mse_batch = mlp_seq.validation_mse

    mse = [
        train_batch_mse_batch, train_seq_mse_batch, eval_batch_mse_batch,
        eval_seq_mse_batch
    ]
    legend_names = ['train batch', 'train seq', 'eval batch', 'eval seq']
    Utils.plot_error_with_epochs(
        mse,
        legend_names=legend_names,
        num_epochs=num_iterations,
        title='MLP with lr = {0}, iterations = {1} , hidden nodes = {2} '.
        format(learning_rate, num_iterations, num_hidden_nodes_layer_1))
Esempio n. 4
0
 def runTest(self):
     inputs = np.array([[1, 0], [0, 1], [1, 1], [0, 0]], dtype = util.FLOAT).T
     outputs = np.array([[0, 1], [0, 1], [1, 0], [1, 0]], dtype = util.FLOAT).T
     mlp = MLP([2, 5, 2], Lambda = 0.0001, alpha = .1, activation = "sigmoid", costType = "mse")
     mlp.fit(inputs, outputs, 20000, 4)
     mlp_result, mlp.prediction = mlp.predict(inputs, outputs)
     loss = np.mean( (mlp_result - outputs)**2)
     print "Prediction: ", mlp_result
     print "Loss: ", loss
     np.testing.assert_almost_equal(loss, 0, 2)
Esempio n. 5
0
def test_MLP_Layer_size(X, Y, Z, num_hidden_nodes_layer_1):
    targets = np.reshape(Z, (1, (len(X) * len(Y)))).T
    n_X = len(X)
    n_Y = len(Y)

    num_data = n_X * n_Y

    xx = np.reshape(X, (1, (num_data)))
    yy = np.reshape(Y, (1, (num_data)))

    patterns = np.vstack((xx, yy)).T

    num_iterations = 500

    learning_rate = 0.01
    verbose = False
    X_train, X_test, y_train, y_test = train_test_split(patterns,
                                                        targets,
                                                        test_size=0.2,
                                                        random_state=42)
    X_train, X_test = X_train.T, X_test.T
    MSEs = []
    Models = []
    for layers in num_hidden_nodes_layer_1:
        mlp_batch = MLP(inputs=X_train,
                        inputs_labels=y_train,
                        num_nodes_hidden_layer=layers,
                        num_iterations=num_iterations,
                        learning_rate=learning_rate,
                        batch_train=True,
                        verbose=verbose,
                        binary=False,
                        num_output_layers=1)

        mlp_batch.fit()

        o_out = mlp_batch.predict(patterns.T)

        # print(o_out.shape)

        Z = np.reshape(o_out, (n_X, n_Y))

        [_, mse] = Utils.compute_error(targets, o_out, False)

        MSEs.append(mse)
        Models.append(mlp_batch)
        title = 'Number of hidden layer: {0} and MSE: {1}'.format(
            layers, round(mse, 4))
        Utils.plot_3d_data(X, Y, Z, title)

    return MSEs, Models
Esempio n. 6
0
def check_MlP_test_sizes(X, Y, Z):
    targets = np.reshape(Z, (1, (len(X) * len(Y)))).T
    n_X = len(X)
    n_Y = len(Y)

    num_data = n_X * n_Y

    xx = np.reshape(X, (1, (num_data)))
    yy = np.reshape(Y, (1, (num_data)))

    patterns = np.vstack((xx, yy)).T

    num_hidden_nodes_layer_1 = 20
    num_iterations = 5000

    learning_rate = 0.001
    verbose = False
    train_test = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
    MSEs = []
    Models = []
    for check in train_test:
        X_train, X_test, y_train, y_test = train_test_split(patterns,
                                                            targets,
                                                            test_size=check,
                                                            random_state=42)
        X_train, X_test = X_train.T, X_test.T
        print(X_train.shape)
        mlp_batch = MLP(inputs=X_train,
                        inputs_labels=y_train,
                        num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                        num_iterations=num_iterations,
                        learning_rate=learning_rate,
                        batch_train=True,
                        verbose=verbose,
                        binary=False,
                        num_output_layers=1)

        mlp_batch.fit()

        o_out = mlp_batch.predict(patterns.T)

        Z = np.reshape(o_out, (n_X, n_Y))

        [_, mse] = Utils.compute_error(targets, o_out, False)

        MSEs.append(mse)
        Models.append(mlp_batch)
        title = 'MSE: {0}'.format(round(mse, 4))
        # Utils.plot_3d_data(X, Y, Z, title)

    return MSEs, Models
Esempio n. 7
0
class Classifier:
    def __init__(self):
        self.mlp = None

    def predict_input_img(self, img):
        plt.imshow(img, cmap="hot", interpolation="nearest")
        plt.show()
        preprocessing_normalized = preprocessing(
            dataset=tf.keras.datasets.mnist.load_data(),
            symetric_dataset=True,
            dimension_reduction=False,
            data_whitening=False,
            normalize=True,
        )
        X_train, y_train, X_test, y_test = preprocessing_normalized.preprocess_mnist(
            [img]
        )
        if self.mlp is None:
            print("Training the model.\nThis will only happen once.")
            self.mlp = MLP(ReLU, X_train.shape[1], [128, 128])
            gen = self.mlp.fit(X_train, y_train, 25, 50)
            ls = list(gen)
        pred = self.mlp.predict(X_test)
        print(f"Our MultiLayer Perceptron thinks the digit is: {pred[0]}")
        ascii_banner = pyfiglet.figlet_format(str(pred[0]))
        print(ascii_banner)
Esempio n. 8
0
 def runTest(self):
     MNIST_DIR = "../Data/mnist.pkl.gz"
     f = gzip.open(MNIST_DIR, "rb")
     train_set, valid_set, test_set = cPickle.load(f)
     f.close()
     X_train, y_train = translate(train_set)
     X_test, y_test = translate(test_set)
     mlp = MLP([784, 800, 300, 10], 0, 0.2, "sigmoid", "mse", load="True")
     mlp.fit(X_train, y_train, 100, 500000)
     mlp_result, mlp_prediction = mlp.predict(X_test, y_test)
     mlp_result, mlp_prediction = mlp.predict(X_train, y_train)
     loss = np.mean( (mlp_result - y_train)**2)
     print "Loss: ", loss
     error = sum([mlp_prediction[i] != train_set[1][i] for i in xrange(len(mlp_prediction))])
     error /= float(len(mlp_prediction))
     print "Error: ", error
     self.assertTrue(error < .1)
Esempio n. 9
0
def experiment_learning_curves_error():
    train_test = [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]

    use_validation_set = True
    num_hidden_nodes_layer_1 = 20
    num_iterations = 1000
    learning_rate = 0.001
    verbose = False

    cases = [1, 2, 3, 4]
    train_MSE = []
    val_MSE = []
    for case in cases:
        [inputs, inputs_labels, input_validation,
         input_validation_labels] = Utils.create_non_linearly_separable_data_2(
             use_validation_set=use_validation_set, case=case)

        print(case)

        current_train = []
        current_validation = []
        for check in train_test:
            X_train, X_test, y_train, y_test = train_test_split(
                inputs.T, inputs_labels, test_size=check, random_state=42)

            mlp_batch = MLP(inputs=X_train.T,
                            inputs_labels=y_train,
                            input_validation=input_validation,
                            input_validation_labels=input_validation_labels,
                            num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                            num_iterations=num_iterations,
                            learning_rate=learning_rate,
                            batch_train=True,
                            verbose=verbose)

            [_, _, mse_batch] = mlp_batch.fit()

            current_train.append(mlp_batch.mse[-1])
            current_validation.append(mlp_batch.validation_mse[-1])

        train_MSE.append(current_train)
        val_MSE.append(current_validation)

    legend_names = [
        'train mse error case 1', 'train mse error case 2',
        'train mse error case 3', 'train mse error case 4',
        'validation mse error case 1', 'validation mse error case 2',
        'validation mse error case 3', 'validation mse error case 4'
    ]

    Utils.plot_learning_curves(
        train_MSE,
        legend_names=legend_names,
        train_size=train_test,
        title='Learning curve with lr = {0}, iterations = {1} '.format(
            learning_rate, num_iterations),
        loss=val_MSE)
Esempio n. 10
0
def main():
    inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    targets = np.array([0, 1, 1, 0])

    # initialize
    mlp = MLP(n_input_units=2, n_hidden_units=3, n_output_units=1)
    mlp.print_configuration()

    # training
    mlp.fit(inputs, targets)
    print('--- training ---')
    print('first layer weight: ')
    print(mlp.v)
    print('second layer weight: ')
    print(mlp.w)

    # predict
    print('--- predict ---')
    for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
        print(i, mlp.predict(i))
Esempio n. 11
0
def main():
    inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    targets = np.array([0, 1, 1, 0])

    # initialize
    mlp = MLP(n_input_units=2, n_hidden_units=3, n_output_units=1)
    mlp.print_configuration()

    # training
    mlp.fit(inputs, targets)
    print('--- training ---')
    print('first layer weight: ')
    print(mlp.v)
    print('second layer weight: ')
    print(mlp.w)

    # predict
    print('--- predict ---')
    for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
        print(i, mlp.predict(i))
Esempio n. 12
0
def main():
    digits = load_digits()
    X = digits.data
    y = digits.target
    X -= X.min()
    X /= X.max()

    mlp = MLP(64, 100, 10)
    mlp.print_configuration()

    X_train, X_test, y_train, y_test = train_test_split(X, y)
    labels_train = LabelBinarizer().fit_transform(y_train)
    labels_test = LabelBinarizer().fit_transform(y_test)

    mlp.fit(X_train, labels_train)
    predictions = []
    for i in range(X_test.shape[0]):
        o = mlp.predict(X_test[i])
        predictions.append(np.argmax(o))
    print confusion_matrix(y_test, predictions)
    print classification_report(y_test, predictions)
Esempio n. 13
0
def experiment_train_validation_nodes():
    use_validation_set = True

    num_iterations = 1000
    learning_rate = 0.002
    verbose = False

    nodes = [1, 5, 10, 20, 25]
    cases = [1, 2, 3, 4]
    train_MSE = []
    val_MSE = []

    for case in cases:
        print(case)
        [inputs, inputs_labels, input_validation,
         input_validation_labels] = Utils.create_non_linearly_separable_data_2(
             use_validation_set=use_validation_set, case=case)

        current_mse = []
        current_val_mse = []
        for node in nodes:
            mlp_batch = MLP(inputs=inputs,
                            inputs_labels=inputs_labels,
                            input_validation=input_validation,
                            input_validation_labels=input_validation_labels,
                            num_nodes_hidden_layer=node,
                            num_iterations=num_iterations,
                            learning_rate=learning_rate,
                            batch_train=True,
                            verbose=verbose)

            [_, _, mse_batch] = mlp_batch.fit()

            current_mse.append(mlp_batch.mse[-1])
            current_val_mse.append(mlp_batch.validation_mse[-1])

        train_MSE.append(current_mse)
        val_MSE.append(current_val_mse)

    legend_names = [
        'train mse error case 1', 'train mse error case 2',
        'train mse error case 3', 'train mse error case 4',
        'validation mse error case 1', 'validation mse error case 2',
        'validation mse error case 3', 'validation mse error case 4'
    ]

    Utils.plot_error_hidden_nodes(
        train_MSE,
        legend_names=legend_names,
        hidden_nodes=nodes,
        title='MLP with learning rate {0}, iterations {1} '.format(
            learning_rate, num_iterations),
        loss=val_MSE)
def main():
    inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
    targets = np.array([0, 1, 1, 0])

    #initialize
    #pengenalan diawal
    mlp = MLP(n_input_units=2, n_hidden_units=3, n_output_units=1)
    mlp.print_configuration()

    #training
    #melatih
    mlp.fit(inputs, targets)
    print 'Sedang melakukan pelatihan/training'  #training
    print 'Bobot lapisan yang pertama: '  #first layer weight
    print mlp.v
    print 'Bobot lapisan yang kedua : '  #second layer weight
    print mlp.w

    # predict
    print 'Memprediksi ... '  #predict
    for i in [[0, 0], [0, 1], [1, 0], [1, 1]]:
        print i, mlp.predict(i)
Esempio n. 15
0
def experiment_train_validation_error():
    use_validation_set = True
    num_hidden_nodes_layer_1 = 20
    num_iterations = 1000
    learning_rate = 0.002
    verbose = False

    cases = [1, 2, 3, 4]
    cases = [1, 2, 3, 4]
    train_MSE = []
    val_MSE = []
    mse = []
    for case in cases:
        [inputs, inputs_labels, input_validation,
         input_validation_labels] = Utils.create_non_linearly_separable_data_2(
             use_validation_set=use_validation_set, case=case)

        # Utils.plot_initial_data(inputs.T, inputs_labels)

        mlp_batch = MLP(inputs=inputs,
                        inputs_labels=inputs_labels,
                        input_validation=input_validation,
                        input_validation_labels=input_validation_labels,
                        num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                        num_iterations=num_iterations,
                        learning_rate=learning_rate,
                        batch_train=True,
                        verbose=verbose)

        [_, _, mse_batch] = mlp_batch.fit()

        mse.append(mlp_batch.mse)
        mse.append(mlp_batch.validation_mse)

    legend_names = [
        'train mse error case 1', 'validation mse error case 1',
        'train mse error case 2', 'validation mse error case 2',
        'train mse error case 3', 'validation mse error case 3',
        'train mse error case 4', 'validation mse error case 4'
    ]

    Utils.plot_error_with_epochs(
        mse,
        legend_names=legend_names,
        num_epochs=num_iterations,
        title='MLP with lr = {0}, iterations = {1} , hidden nodes = {2} '.
        format(learning_rate, num_iterations, num_hidden_nodes_layer_1))
Esempio n. 16
0
def run_hidden_nodes_mse_plot_experiment():
    use_validation_set = False

    [inputs, inputs_labels, input_validation,
     input_validation_labels] = Utils.create_non_linearly_separable_data_2(
         use_validation_set=use_validation_set)

    Utils.plot_initial_data(inputs.T, inputs_labels)

    num_iterations = 1000
    learning_rate = 0.002
    verbose = True

    nodes = [1, 5, 10, 20, 30, 40, 50, 70, 80, 90, 100, 200, 250]
    nodes = np.arange(1, 50, 1)
    losses = []
    mses = []
    for node in nodes:
        mlp_batch = MLP(inputs=inputs,
                        inputs_labels=inputs_labels,
                        input_validation=input_validation,
                        input_validation_labels=input_validation_labels,
                        num_nodes_hidden_layer=node,
                        num_iterations=num_iterations,
                        learning_rate=learning_rate,
                        batch_train=True,
                        verbose=verbose)

        [_, _, mse_batch] = mlp_batch.fit()
        out = mlp_batch.predict(inputs)
        [loss, mse] = mlp_batch.evaluate(out, inputs_labels)
        losses.append(loss)
        mses.append(mse)

    legend_names = ['mse', 'misclassification']
    Utils.plot_error_hidden_nodes(
        mses,
        legend_names=legend_names,
        hidden_nodes=nodes,
        title='MLP with learning rate {0}, iterations {1} '.format(
            learning_rate, num_iterations),
        loss=losses)
Esempio n. 17
0
# Ziping Chen
# March 2020

from MLP import MLP
from config import net_conf
from data import get_data

# get training data
x_train, y_train, x_test, y_test = get_data('mnist')

# define model
model = MLP(net_conf['architecture'])
# print model summary
model.summary()
# (1) fit model with split validation
model.fit(x_train,
        y_train,
        net_conf['training'],
        valid=1/6)
# (2) fit model and validate with test set
# model.fit(x_train,
#         y_train,
#         net_conf['training'],
#         test_x=x_test,
#         test_y=y_test)
# plot learning curve with accuracy
model.plot()
# evaluate dataset
# model.evaluate(your_dataset)
# save model
# model.save('your_path.h5py')
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=2015)

# Normalize data
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)

# prepare data: transpose x's and y's, and binary-ize the y's
x_train = x_train.T
x_test = x_test.T
y_train = np.vstack((1 - y_train, y_train))
y_test = np.vstack((1 - y_test, y_test))

logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S')
mlp = MLP(num_hidden_units=50, alpha=.01, lambda_penalty=0.0001, activation="tanh", random_seed=1234)
mlp.fit(x_train, y_train, epochs=50, batch_size=10000, stall_limit=100, pct_validation=0.1)

probs_train = mlp.predict_proba(x_train)
loss = np.mean((probs_train - y_train)**2)
print "Training Loss: ", loss

probs_test = mlp.predict_proba(x_test)
loss = np.mean((probs_test - y_test)**2)
print "Test Loss: ", loss

class_scores = mlp.score_classes(x_train, y_train)
print "Train class scores:", class_scores
class_scores = mlp.score_classes(x_test, y_test)
print "Test class scores:", class_scores

print "Train Score"
Esempio n. 19
0
def run_perceptron(batch_size, bias, epoch, function, layers, learning_rate,
                   momentum, list_of_paths_to_data, rng, suffix,
                   classification, base_path_prefix):
    (learning_set, learning_answers, testing_set,
     testing_answers) = get_data_for_learning(list_of_paths_to_data[0],
                                              list_of_paths_to_data[1],
                                              list_of_paths_to_data[2])
    mean_squared_errors_test = []
    mean_squared_errors_train = []
    avg_acc_errors_test = []
    avg_acc_errors_train = []
    epoch_points_size = 100
    epoch_separation = epoch // epoch_points_size if epoch >= epoch_points_size else 1
    epoch_measure_points = []
    iters_in_epoch = ceil(1.0 / batch_size)

    def iter_cb(mlp, avg_error, epoch, iter):
        #print("Epoch", epoch + 1)
        if (epoch + 1) % epoch_separation == 0 and iter == iters_in_epoch - 1:
            m.print_iter(mlp, avg_error, epoch + 1, iter)
            (train_errors,
             test_errors) = m.score_perceptron(mlp, learning_set,
                                               learning_answers, testing_set,
                                               testing_answers)
            (mean_squared_error_train, avg_acc_error_train) = train_errors
            (mean_squared_error_test, avg_acc_error_test) = test_errors
            mean_squared_errors_train.append(mean_squared_error_train)
            mean_squared_errors_test.append(mean_squared_error_test)
            avg_acc_errors_train.append(avg_acc_error_train)
            avg_acc_errors_test.append(avg_acc_error_test)
            epoch_measure_points.append(epoch + 1)
            print(f"mean_squared_error_train - {mean_squared_error_train}")
            print(f"mean_squared_error_test - {mean_squared_error_test}")
            print(f"avg_acc_error - {avg_acc_error_train}")
            print(f"avg_acc_error - {avg_acc_error_test}")
        return True

    perceptron = MLP(layers, function, batch_size, epoch, learning_rate,
                     momentum, bias, rng, classification, iter_cb)
    perceptron.fit(learning_set, learning_answers)
    result = perceptron.predict(testing_set)
    base_path = base_path_prefix + "_" + os.path.basename(
        list_of_paths_to_data[0])[:-4]

    perceptron.net.save_to_files(base_path)

    with open(f"{base_path}_{suffix}_results.txt", 'w') as file:
        file.write("mean_squared_errors_test: ")
        file.writelines(f"{error} " for error in mean_squared_errors_test)
        file.write('\n')
        file.write("mean_squared_errors_train: ")
        file.writelines(f"{error} " for error in mean_squared_errors_train)
        file.write('\n')
        file.write("avg_acc_errors_test: ")
        file.writelines(f"{error} " for error in avg_acc_errors_test)
        file.write('\n')
        file.write("avg_acc_errors_train: ")
        file.writelines(f"{error} " for error in avg_acc_errors_train)
        file.write('\n')
        file.write("epoch_measure_points: ")
        file.writelines(f"{error} " for error in epoch_measure_points)

    # save to csv
    res = result.rename(columns={result.columns[0]: "Label"})
    res.insert(loc=0, column='ImageId', value=np.arange(1, len(res) + 1))
    res.to_csv(f"{base_path}_{suffix}_result.csv", index=False)
Esempio n. 20
0
def prepare_and_run_perceptron(learning_set_path, testing_set_path):

    if error_in_paths(learning_set_path, testing_set_path):
        return

    print("Loading data...")
    (learning_set, learning_answers, testing_set,
     testing_answers) = get_data_for_learning(learning_set_path,
                                              testing_set_path)

    print("Loading perceptron parameters...")
    config = configparser.ConfigParser()
    config.read('./parameters.ini')

    true_false_converter = {'yes': True, 'no': False}

    classification = true_false_converter.get(
        config['Parameters']['classification'], None)
    if classification is None:
        raise ValueError("Incorrect classification or regression setting")
        return

    activation_function = config['Parameters']['activation function']
    if activation_function is None:
        raise ValueError("Incorrect activation function")
        return

    bias = true_false_converter.get(config['Parameters']['bias'], None)
    if bias is None:
        raise ValueError("Incorrect bias setting")
        return

    layers = config['Parameters']['number of neurons in each layer']
    layers = [int(x.strip()) for x in layers.split(',')]

    epochs = int(config['Parameters']['epochs'])
    batch_size = float(config['Parameters']['batch size'])
    learning_rate = float(config['Parameters']['learning rate'])
    momentum = float(config['Parameters']['momentum'])
    rng_seed = int(config['Parameters']['rng seed'])

    print("Creating perceptron...")

    mean_squared_errors_test = []
    mean_squared_errors_train = []

    avg_acc_errors_test = []
    avg_acc_errors_train = []

    epoch_points_size = 10
    epoch_separation = epochs // epoch_points_size if epochs >= epoch_points_size else 1

    epoch_measure_points = []
    iters_in_epoch = ceil(1.0 / batch_size)

    def iter_cb(mlp, avg_error, epoch, iter):
        if (epoch + 1) % epoch_separation == 0 and iter == iters_in_epoch - 1:
            print_iter(mlp, avg_error, epoch + 1, iter)
            (train_errors,
             test_errors) = score_perceptron(mlp, learning_set,
                                             learning_answers, testing_set,
                                             testing_answers)
            (mean_squared_error_train, avg_acc_error_train) = train_errors
            (mean_squared_error_test, avg_acc_error_test) = test_errors
            mean_squared_errors_train.append(mean_squared_error_train)
            mean_squared_errors_test.append(mean_squared_error_test)
            avg_acc_errors_train.append(avg_acc_error_train)
            avg_acc_errors_test.append(avg_acc_error_test)
            epoch_measure_points.append(epoch + 1)
            print("MSE:", mean_squared_error_test)
        return True

    perceptron = MLP(layers, activation_function, batch_size, epochs,
                     learning_rate, momentum, bias, rng_seed, classification,
                     iter_cb)

    print("Learning in progress...")
    perceptron.fit(learning_set, learning_answers)

    result = perceptron.predict(testing_set)
    print("Completed!")

    # oldnet = perceptron.net
    # perceptron.net.save_to_files("save_test")
    # perceptron.net = nn.NeuralNetwork.load_from_files("save_test")
    # perceptron.net.fit_params.iter_callback = iter_cb
    # perceptron.fit(learning_set, learning_answers)
    if classification:
        if ask_to_see_visualisation("confusion matrix"):
            v.confusion_matrix(testing_answers, result)
        if ask_to_see_visualisation("result of classification"):
            v.visualize_classification(perceptron, testing_set, result)
        if ask_to_see_visualisation("accuracy plot"):
            v.visualize_accuracy(avg_acc_errors_train, avg_acc_errors_test,
                                 epoch_measure_points)
    else:
        if ask_to_see_visualisation("result of regression"):
            v.visualize_regression(learning_set, learning_answers, testing_set,
                                   testing_answers, result)
        if ask_to_see_visualisation("average error values plot"):
            v.visualize_avg_errors(avg_acc_errors_train, avg_acc_errors_test,
                                   epoch_measure_points)

    if ask_to_see_visualisation("mean square errors plot"):
        v.visualize_mean_sqrt_errors(mean_squared_errors_train,
                                     mean_squared_errors_test,
                                     epoch_measure_points)
    if ask_to_see_visualisation("result model with weights"):
        v.show_edges_weight(perceptron)
Esempio n. 21
0
#%%
#Split data
train_index = 50000
validation_index = 50000
# Training data
input_data = train_data[:train_index, :]
output_data = label_one_hot[:train_index, ]
#Validation data for testing.
vali_data = train_data[validation_index:, :]
vali_label = label[validation_index:]
#%%
# Using different configuration of model for training
# Benchmark model: batch GD, 4 layers, MSE loss, learning rate 0.01
benchmark_nn = MLP([128, 60, 50, 10], [None, 'relu', 'relu', 'relu'])
benchmark_nn_loss = benchmark_nn.fit(input_data, output_data, learning_rate=0.01, epochs=80, \
                                minibatches_size=0,loss_function='MSE', lb=0, p=1, \
                                batch_norm=False, momentum=False, r=0 )
# Training
train_output = benchmark_nn.predict(input_data)
acc_train = accuracy(label[:train_index], train_output)
#Testing
vali_output = benchmark_nn.predict(vali_data)
acc_test = accuracy(vali_label, vali_output)
print('Training accuracy for benchmark model:', acc_train)
print('Validation accuracy for benchmark model:', acc_test)

#%%
# Model 1: 4 layers, cross-entropy loss, mini-batch training (256).
model1 = MLP([128, 80, 70, 10], [None, 'relu', 'relu', 'softmax'])
model1_loss = model1.fit(input_data, output_data, learning_rate=0.01, epochs=150, \
                                minibatches_size=256,loss_function='cross_entropy', lb=0, p=1, \
Esempio n. 22
0
# 2. split data into x and y
x = iris.data
y = iris.target

# 3. train, test, split
X_train, X_test, Y_train, Y_test = train_test_split(x,
                                                    y,
                                                    test_size=.3,
                                                    random_state=0)

# 4. feature Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# 5. Fit and predict
mlp = MLP()
mlp.fit(X_train, Y_train)
for i in X_test:
    print(mlp.predict([i]))
"""
I want to do:
    MLP(targets = targets[], num_inputs = int(num))
    targets are number of classes to identify, 
    num_inputs = number of input nodes on the input layer
    MLP.fit(X_train, Y_train)
    MLP.predict(X_test)
    
    

"""
import Utils
from MLP import MLP

if __name__ == "__main__":
    [data, labels] = Utils.create_one_out_of_n_dataset()

    num_hidden_nodes_layer_1 = 3
    num_output_layers = 8
    num_iterations = 10000
    learning_rate = 0.01

    mlp_batch = MLP(inputs=data,
                    inputs_labels=data,
                    num_output_layers=num_output_layers,
                    learning_rate=learning_rate,
                    num_nodes_hidden_layer=num_hidden_nodes_layer_1,
                    num_iterations=num_iterations,
                    batch_train=True,
                    verbose=True)

    [_, _, mse_batch] = mlp_batch.fit()

    mse = [mse_batch]

    legend_name = ['batch error']
    # Utils.plot_error(mse, legend_name, num_epochs=num_iterations)
Esempio n. 24
0
def run_perceptron(batch_size, bias, epoch, function, layers, learning_rate, momentum, list_of_paths_to_data, rng,
                   suffix, classification):
    (learning_set, learning_answers, testing_set, testing_answers) = m.get_data_for_learning(list_of_paths_to_data[0],
                                                                                             list_of_paths_to_data[1])
    mean_squared_errors_test = []
    mean_squared_errors_train = []
    avg_acc_errors_test = []
    avg_acc_errors_train = []
    epoch_points_size = 100
    epoch_separation = epoch // epoch_points_size if epoch >= epoch_points_size else 1
    epoch_measure_points = []
    iters_in_epoch = ceil(1.0 / batch_size)

    def iter_cb(mlp, avg_error, epoch, iter):
        if (epoch + 1) % epoch_separation == 0 and iter == iters_in_epoch - 1:
            m.print_iter(mlp, avg_error, epoch + 1, iter)
            (train_errors, test_errors) = m.score_perceptron(mlp,
                                                             learning_set,
                                                             learning_answers,
                                                             testing_set,
                                                             testing_answers)
            (mean_squared_error_train, avg_acc_error_train) = train_errors
            (mean_squared_error_test, avg_acc_error_test) = test_errors
            mean_squared_errors_train.append(mean_squared_error_train)
            mean_squared_errors_test.append(mean_squared_error_test)
            avg_acc_errors_train.append(avg_acc_error_train)
            avg_acc_errors_test.append(avg_acc_error_test)
            epoch_measure_points.append(epoch + 1)
            print("MSE:", mean_squared_error_test)
        return True

    perceptron = MLP(layers, function, batch_size, epoch, learning_rate,
                     momentum, bias, rng, classification, iter_cb)
    perceptron.fit(learning_set, learning_answers)
    result = perceptron.predict(testing_set)
    base_path = os.path.basename(list_of_paths_to_data[0])[:-4]
    perceptron.net.save_to_files(f"{base_path}_{suffix}_net")

    if classification:
        v.visualize_classification(perceptron, testing_set, result, True,
                                   f"{base_path}_{suffix}_classification.png")
        v.confusion_matrix(testing_answers, result, True,
                           f"{base_path}_{suffix}_confusion_matrix.png")
        v.visualize_accuracy(avg_acc_errors_train, avg_acc_errors_test, epoch_measure_points, True,
                             f"{base_path}_{suffix}_accuracy.png")
    else:
        v.visualize_regression(learning_set, learning_answers, testing_set, testing_answers,
                               result, True, f"{base_path}_{suffix}_regression.png")
        v.visualize_avg_errors(avg_acc_errors_train, avg_acc_errors_test, epoch_measure_points, True,
                               f"{base_path}_{suffix}_avg_errors.png")

    v.visualize_mean_sqrt_errors(mean_squared_errors_train, mean_squared_errors_test, epoch_measure_points,
                       True, f"{base_path}_{suffix}_mean_square_errors.png")
    v.show_edges_weight(perceptron, True, f"{base_path}_{suffix}_weights.png")

    with open(f"{base_path}_{suffix}_results.txt", 'w') as file:
        file.write("mean_squared_errors_test: ")
        file.writelines(f"{error} " for error in mean_squared_errors_test)
        file.write('\n')
        file.write("mean_squared_errors_train: ")
        file.writelines(f"{error} " for error in mean_squared_errors_train)
        file.write('\n')
        file.write("avg_acc_errors_test: ")
        file.writelines(f"{error} " for error in avg_acc_errors_test)
        file.write('\n')
        file.write("avg_acc_errors_train: ")
        file.writelines(f"{error} " for error in avg_acc_errors_train)
Esempio n. 25
0
# Mean normalization for data
mean1, mean2, mean3, mean4 = np.mean(x_train[:, 0]), np.mean(
    x_train[:, 1]), np.mean(x_train[:, 2]), np.mean(x_train[:, 3])
min1, max1 = x_train[:, 0].min(), x_train[:, 0].max()
min2, max2 = x_train[:, 1].min(), x_train[:, 1].max()
min3, max3 = x_train[:, 2].min(), x_train[:, 2].max()
min4, max4 = x_train[:, 3].min(), x_train[:, 3].max()
x_train[:, 0], x_train[:, 1], x_train[:, 2], x_train[:, 3] = (
    x_train[:, 0] - mean1) / (max1 - min1), (x_train[:, 1] - mean2) / (
        max2 - min2), (x_train[:, 2] - mean3) / (max3 - min3), (
            x_train[:, 3] - mean4) / (max4 - min4)
x_test[:, 0], x_test[:, 1], x_test[:, 2], x_test[:, 3] = (
    x_test[:, 0] - mean1) / (max1 - min1), (x_test[:, 1] - mean2) / (
        max2 - min2), (x_test[:, 2] - mean3) / (max3 - min3), (
            x_test[:, 3] - mean4) / (max4 - min4)

model = MLP()

model.add_layer(units=8, activation='relu', input_units=4)
model.add_layer(units=3, activation='softmax', input_units=8)
model.fit(X=x_train, y=y_train, epochs=500, learning_rate=0.005)

predictions = model.predict(X=x_test)
print(model.eval(predictions, y_test))
print(predictions)

plt.xlabel("Number of Epochs")
plt.ylabel("Cost")
plt.plot(model.epochs, model.cost)
plt.show()
Esempio n. 26
0
df = df.drop(
    columns=["ZN", "INDUS", "CHAS", "NOX", "AGE", "DIS", "RAD", "TAX", "B"])
"""splitting data & target"""
M = df.to_numpy()
M = np.array(M)
target = M[:, 4]
data = M[:, :4]
data = whiten(data)

# print(data)
# print(target)

# while True:
# for i in [1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]:
# for i in range(50):
mlp = MLP(hidden_layers=(5, 6, 6), iterations=10000)
mlp.fit(data, target)
# mlp.fit(data, target, False)

# print(f"Result: {mlp.predict(data)}")
# print(f"No. Iterations: {i}")
# print(i)
print(f"Loss: {np.mean(np.square(np.array([target]).T - mlp.predict(data)))}")
print(mlp.predict(data))
# input()

# print(np.array([target]).T - mlp.predict(data))

# print(mlp.weights)
# print(mlp.bias)
Esempio n. 27
0
y_train = np.vstack((1 - y_train, y_train))
y_test = np.vstack((1 - y_test, y_test))

logging.basicConfig(
    level=logging.DEBUG,
    format=
    '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
    datefmt='%a, %d %b %Y %H:%M:%S')
mlp = MLP(num_hidden_units=50,
          alpha=.01,
          lambda_penalty=0.0001,
          activation="tanh",
          random_seed=1234)
mlp.fit(x_train,
        y_train,
        epochs=50,
        batch_size=10000,
        stall_limit=100,
        pct_validation=0.1)

probs_train = mlp.predict_proba(x_train)
loss = np.mean((probs_train - y_train)**2)
print "Training Loss: ", loss

probs_test = mlp.predict_proba(x_test)
loss = np.mean((probs_test - y_test)**2)
print "Test Loss: ", loss

class_scores = mlp.score_classes(x_train, y_train)
print "Train class scores:", class_scores
class_scores = mlp.score_classes(x_test, y_test)
print "Test class scores:", class_scores
Esempio n. 28
0

# Leitura do csv
df = pd.read_csv('semeion.data', sep=' ', lineterminator='\n')
print('Shape do arquivo:', df.shape)

# Preparação do dataset
digitos = prepara_dataset(df, 10)

# Criando a rede
mlp = MLP(hidden_units=[30], n_classes=10, learning_rate=0.5, delta_error=1e-2)
print('\nMLP criada ({})'.format(mlp))

# Treinando a rede
print('\nTreinando:\n')
mlp.fit(digitos, train_size=0.7, verbose=True)

# Score
print('\nAcurácia:', mlp.score())

# Resultado
plt.figure(figsize=(17, 5))
for i in range(1, 17):
    plt.subplot(2, 8, i)
    rand = np.random.randint(0, len(df))
    real = np.argmax(digitos.Y[rand])
    pred = mlp.predict(digitos.X[rand])
    plt.imshow(digitos.X[rand].reshape(16, 16), cmap='binary')
    plt.xlabel('Real: {}\nPrevisto: {}'.format(real, pred))
    plt.xticks([])
    plt.yticks([])
Esempio n. 29
0
from matplotlib import cm
from MLP import MLP
import time

if __name__ == '__main__':
    np.random.seed(0)
    m = 1000
    X = np.random.rand(m, 2) * np.pi
    y = np.cos(X[:, 0] * X[:, 1]) * np.cos(2 * X[:, 0])
    X = (X / np.pi) * 2 - 1  # normalization to [-1.0, 1.0]

    mlp = MLP(K=16, T=1 * 10 ** 6, eta=0.01,
              random_state=1)  # random_state=0 -> unlucky starting point, random_state=1 -> lucky starting point
    print("FITTING MLP...")
    t1 = time.time()
    mlp.fit(X, y)
    t2 = time.time()
    print("FITTING DONE IN " + str(t2 - t1) + " s.")
    y_pred = mlp.predict(X)
    err = 0.5 * (y_pred - y).dot(y_pred - y) / m
    print("MEAN 1/2 ERR^2: " + str(err))

    steps = 20
    #     X1, X2 = np.meshgrid(np.linspace(0.0, np.pi, steps), np.linspace(0.0, np.pi, steps))
    #     X12 = np.array([X1.ravel(), X2.ravel()]).T
    #     y_ref = np.cos(X12[:, 0] * X12[:, 1]) * np.cos(2 * X12[:, 0])

    X1, X2 = np.meshgrid(np.linspace(-1.0, 1.0, steps), np.linspace(-1.0, 1.0, steps))
    X12 = np.array([X1.ravel(), X2.ravel()]).T
    X1_temp = (X1 + 1.0) / 2.0 * np.pi  # denormalization
    X2_temp = (X2 + 1.0) / 2.0 * np.pi  # denormalization
Esempio n. 30
0
for _ in range(4):
    i = int(os.environ['N_Client'])
    X_slices, y_slices = create_clients()

    client_model = MLP()
    client_model.build()

    global_weights = []
    for layer in range(len(client_model.get_layers()) * 2):
        r = requests.get(
            'http://server:8000/get_weights/?layer={}'.format(layer))
        global_weights.append(np.array(r.json()[str(layer)]))

    client_model.compile()
    client_model.set_weights(global_weights)
    client_model.fit(X_slices[i], y_slices[i])

    local_weights = client_model.get_weights()

    url = 'http://server:8000/send_weights/'

    for layer in range(len(client_model.get_layers()) * 2):
        myobj = json.dumps({
            "layer": layer,
            "data_qtd": X_slices.shape[0],
            "weights": local_weights[layer].tolist()
        }).encode('utf-8')
        x = requests.post(url, data=myobj)

    time.sleep(randint(10, 15))