def test_evaluation_metrics_MLP(self):
     mlp = MultilayerPerceptron(16, [16, 8, 4, 2], 2)
     test_predictions = np.array([[0.9, 0.1], [0.01, 0.99]])
     labels = np.array([[1, 0], [0, 1]])
     test_acc = mlp.compute_predictions_accuracy(test_predictions, labels)
     confusion_matrix = mlp.compute_confussion_matrix(
         test_predictions, labels)
     self.assertEqual(test_acc, 100)
     np.testing.assert_array_equal(confusion_matrix,
                                   np.array([[1, 0], [0, 1]]))
Esempio n. 2
0
def perform_one_experiment(X_train, Y_train, X_test, Y_test, config):
    """Performs one experiment with a given data set and generates results."""
    # Prepare data
    processor = Processor(**config['processor_params'])
    X_train = processor.fit_transform(X_train)
    X_test = processor.transform(X_test)
    imputer = Imputer(**config['imputer_params'])
    X_train = imputer.fit_transform(X_train)
    X_test = imputer.transform(X_test)

    # Creates the algorithm object
    algorithm_name = config['experiment']['algorithm']
    if algorithm_name == 'random_guess':
        algorithm = RandomGuessAlgorithm(**config['algo_params'])
    elif algorithm_name == 'rf':
        algorithm = RandomForestAlgorithm(**config['algo_params'])
    elif algorithm_name == 'multilayer_perceptron':
        algorithm = MultilayerPerceptron(n_input=X_train.shape[1], **config['algo_params'])
    elif algorithm_name == 'gradient_boosting':
        algorithm = GradientBoostingAlgorithm(**config['algo_params'])
    else:
        raise NotImplementedError('Algorithm {} is not an available option'.format(algorithm_name))

    # Perform experiment
    results = dict()
    results['fit_info'] = algorithm.fit(X_train, Y_train)
    pred_proba = algorithm.predict_proba(X_test)
    pred = np.argmax(pred_proba, axis=1)

    # Calculate and save results
    results['log_loss'] = metrics.log_loss(Y_test, pred_proba[:, 1])
    results['accuracy'] = metrics.accuracy_score(Y_test, pred)
    results['recall'] = metrics.recall_score(Y_test, pred, labels=[0, 1])
    results['precision'] = metrics.precision_score(Y_test, pred, labels=[0, 1])
    fpr, tpr, thresholds = metrics.roc_curve(Y_test, pred_proba[:, 1])
    results['roc_curve'] = {'fpr': fpr, 'tpr': tpr, 'thresholds': thresholds}
    results['roc_auc'] = metrics.auc(fpr, tpr)
    results['classification_report'] = metrics.classification_report(Y_test, pred, labels=[0, 1])

    return results
Esempio n. 3
0
# ..........................
#  TRAIN / TEST SPLIT
# ..........................
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# Rescale label for Adaboost to {-1, 1}
rescaled_y_train = 2 * y_train - np.ones(np.shape(y_train))
rescaled_y_test = 2 * y_test - np.ones(np.shape(y_test))

# .......
#  SETUP
# .......
adaboost = Adaboost(n_clf=8)
naive_bayes = NaiveBayes()
knn = KNN(k=4)
logistic_regression = LogisticRegression()
mlp = MultilayerPerceptron(n_hidden=20)
perceptron = Perceptron()
decision_tree = DecisionTree()
random_forest = RandomForest(n_estimators=150)
support_vector_machine = SupportVectorMachine(C=1, kernel=rbf_kernel)

# ........
#  TRAIN
# ........
print "Training:"
print "\tAdaboost"
adaboost.fit(X_train, rescaled_y_train)
print "\tNaive Bayes"
naive_bayes.fit(X_train, y_train)
print "\tLogistic Regression"
logistic_regression.fit(X_train, y_train)
Esempio n. 4
0
from pixel_parser import Parser
from numpy import exp, array, random, dot
from multilayer_perceptron import MultilayerPerceptron, NeuronLayer

parser = Parser()

#Seed the random number generator
random.seed(1)

hidden_layer_1 = NeuronLayer(5, 35)

# Create layer 2 (a single neuron with 4 inputs)
layer2 = NeuronLayer(1, 5)

# Combine the layers to create a neural network
neural_network = MultilayerPerceptron([hidden_layer_1], layer2)

# print("Stage 1) Random starting synaptic weights: ")a

# The training set. We have 7 examples, each consisting of 3 input values
# and 1 output value.
training_set_inputs = array(parser.get_pixels())
training_set_outputs = array([[0], [0], [1], [1], [1], [0], [1], [0], [0],
                              [0]])

# Train the neural network using the training set.
# Do it 60,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 100000)

# print("Stage 2) New synaptic weights after training: ")
# neural_network.print_weights()
Esempio n. 5
0
# ..........................
#  TRAIN / TEST SPLIT
# ..........................
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# Rescaled labels {-1, 1}
rescaled_y_train = 2 * y_train - np.ones(np.shape(y_train))
rescaled_y_test = 2 * y_test - np.ones(np.shape(y_test))

# .......
#  SETUP
# .......
adaboost = Adaboost(n_clf=8)
naive_bayes = NaiveBayes()
knn = KNN(k=4)
logistic_regression = LogisticRegression()
mlp = MultilayerPerceptron(n_hidden=20, n_iterations=20000, learning_rate=0.1)
perceptron = Perceptron()
decision_tree = ClassificationTree()
random_forest = RandomForest(n_estimators=50)
support_vector_machine = SupportVectorMachine()
lda = LDA()
gbc = GradientBoostingClassifier(n_estimators=50,
                                 learning_rate=.9,
                                 max_depth=2)
xgboost = XGBoost(n_estimators=50, learning_rate=0.5, max_depth=2)

# ........
#  TRAIN
# ........
print("Training:")
print("\tAdaboost")
Esempio n. 6
0
if __name__ == "__main__":
    print("\nLoading data... ", end="")
    data, labels = load_mnist("./data/mnist_data.csv")
    print("done!")

    i = int(len(data) * TEST_SET_PC)
    X_train, Y_train = data[i:], labels[i:]
    X_test, Y_test = data[:i], labels[:i]

    print("\nTraining set samples: %d (%d%%)" % (len(X_train), 100 *
                                                 (1 - TEST_SET_PC)))
    print("Test set samples: %d (%d%%)" % (len(X_test), 100 * TEST_SET_PC))

    mlp = MultilayerPerceptron(input_size=784,
                               layers_size=HIDDEN_LAYERS + [10],
                               layers_activation="sigmoid")
    print("\nInitial accuracy (training set): %.2f%%" %
          (100 * accuracy(mlp.predict(X_train), Y_train)))
    print("Initial accuracy (test set): %.2f%%" %
          (100 * accuracy(mlp.predict(X_test), Y_test)))

    print("\nStarting training session...")
    mlp.fit(
        data=X_train,
        labels=Y_train,
        cost_function=MeanSquaredError(),
        epochs=TRAINING_EPOCHS,
        learning_rate=LEARNING_RATE,
        batch_size=32,
        gradient_checking=False,
Esempio n. 7
0
num_training_examples = 5000

x_train = train_data[:num_training_examples, 1:]
y_train = train_data[:num_training_examples, [0]]

x_test = test_data[:, 1:]
y_test = test_data[:, [0]]

layers = [784, 25, 10]

normalize_data = True
max_iterations = 500
alpha = 0.1

multilayer_perceptron = MultilayerPerceptron(x_train, y_train, layers,
                                             normalize_data)
(thetas, costs) = multilayer_perceptron.train(max_iterations, alpha)
plt.plot(range(len(costs)), costs)
plt.xlabel('Grident steps')
plt.xlabel('costs')
plt.show()

y_train_predictions = multilayer_perceptron.predict(x_train)
y_test_predictions = multilayer_perceptron.predict(x_test)

train_p = np.sum(y_train_predictions == y_train) / y_train.shape[0] * 100
test_p = np.sum(y_test_predictions == y_test) / y_test.shape[0] * 100
print('训练集准确率:', train_p)
print('测试集准确率:', test_p)

numbers_to_display = 64
Esempio n. 8
0
f_activation='sigmoid'

# Generamos el archivo con los resultados obtenidos

f = open ('./results/multilayer_perceptron/' + f_activation + '/multilayer_perceptron_'  + repr(num_dense) + 'n_' + repr(max_num_epocs) 
          + 'e_f_' + f_activation + '.txt','w')

for i in range(1,max_num_epocs+1):
    
    print("-"*50)
    print("Construye red neuronal con ", i, "épocas de entrenamiento")
    print("-"*50)
    
    #network = ConvolutionalNetwork('./files', i, 'ConvolutionalNetwork')
    
    network = MultilayerPerceptron('./files', i, 'MultilayerPerceptron')
    
    network.layers_construction(training_sample = True)
    
    network.model.summary()
    
    start_time = time()
    
    network.learn_training_sample()
    
    elapsed_time = time() - start_time

    loss,acc = network.evaluate_training_sample()
    
    f.write(repr(i) + '\t' + repr(loss) + '\t' + repr(acc) + '\t' + repr(elapsed_time) + '\n')
    
Esempio n. 9
0
from tensorflow.examples.tutorials.mnist import input_data

from multilayer_perceptron import MultilayerPerceptron

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
mlp = MultilayerPerceptron()
mlp.fit(mnist.train.images, mnist.train.labels)
Esempio n. 10
0
from tensorflow.examples.tutorials.mnist import input_data

from multilayer_perceptron import MultilayerPerceptron

mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
n_inputs = len(mnist.train.images)
n_classes = len(mnist.train.labels[0])
batch_size = n_inputs // 10

print('Num Inputs: {}'.format(n_inputs))
print('Num classes: {}'.format(n_classes))
print('Batch size: {}'.format(batch_size))

mlp = MultilayerPerceptron(n_inputs, n_classes, batch_size)
mlp.add_layer(50)
mlp.add_layer(20)
mlp.add_layer(10)
mlp.fit(mnist.train.images, mnist.train.labels)
pred = mlp.predict(mnist.test.images)