Beispiel #1
0
def main():
    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    # One-hot encoding of nominal y-values
    y = to_categorical(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, seed=1)

    # Perceptron
    clf = Perceptron(n_iterations=5000,
        learning_rate=0.001,
        loss=CrossEntropy,
        activation_function=Sigmoid)
    clf.fit(X_train, y_train)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)

    print ("Accuracy:", accuracy)

    # Reduce dimension to two using PCA and plot the results
    Plot().plot_in_2d(X_test, y_pred, title="Perceptron", accuracy=accuracy, legend_labels=np.unique(y))
def main():

    optimizer = Adam()

    #-----
    # MLP
    #-----

    data = datasets.load_digits()
    X = data.data
    y = data.target

    # Convert to one-hot encoding
    y = to_categorical(y.astype("int"))

    n_samples, n_features = X.shape
    n_hidden = 512

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1)

    clf = NeuralNetwork(optimizer=optimizer,
                        loss=CrossEntropy,
                        validation_data=(X_test, y_test))

    clf.add(Dense(n_hidden, input_shape=(n_features,)))
    clf.add(Activation('leaky_relu'))
    clf.add(Dense(n_hidden))
    clf.add(Activation('leaky_relu'))
    clf.add(Dropout(0.25))
    clf.add(Dense(n_hidden))
    clf.add(Activation('leaky_relu'))
    clf.add(Dropout(0.25))
    clf.add(Dense(n_hidden))
    clf.add(Activation('leaky_relu'))
    clf.add(Dropout(0.25))
    clf.add(Dense(10))
    clf.add(Activation('softmax'))

    print ()
    clf.summary(name="MLP")

    train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256)

    # Training and validation error plot
    n = len(train_err)
    training, = plt.plot(range(n), train_err, label="Training Error")
    validation, = plt.plot(range(n), val_err, label="Validation Error")
    plt.legend(handles=[training, validation])
    plt.title("Error Plot")
    plt.ylabel('Error')
    plt.xlabel('Iterations')
    plt.show()

    _, accuracy = clf.test_on_batch(X_test, y_test)
    print ("Accuracy:", accuracy)

    # Reduce dimension to 2D using PCA and plot the results
    y_pred = np.argmax(clf.predict(X_test), axis=1)
    Plot().plot_in_2d(X_test, y_pred, title="Multilayer Perceptron", accuracy=accuracy, legend_labels=range(10))
Beispiel #3
0
def main():

    X, y = datasets.make_classification(n_samples=1000,
                                        n_features=10,
                                        n_classes=4,
                                        n_clusters_per_class=1,
                                        n_informative=2)

    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target
    y = to_categorical(y.astype("int"))

    # Model builder
    def model_builder(n_inputs, n_outputs):
        model = NeuralNetwork(optimizer=Adam(), loss=CrossEntropy)
        model.add(Dense(16, input_shape=(n_inputs, )))
        model.add(Activation('relu'))
        model.add(Dense(n_outputs))
        model.add(Activation('softmax'))

        return model

    # Print the model summary of a individual in the population
    print("")
    model_builder(n_inputs=X.shape[1], n_outputs=y.shape[1]).summary()

    population_size = 100
    n_generations = 3000
    mutation_rate = 0.01

    print("Population Size: %d" % population_size)
    print("Generations: %d" % n_generations)
    print("Mutation Rate: %.2f" % mutation_rate)
    print("")

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.4,
                                                        seed=1)

    model = Neuroevolution(population_size=population_size,
                           mutation_rate=mutation_rate,
                           model_builder=model_builder)

    model = model.evolve(X_train, y_train, n_generations=n_generations)

    loss, accuracy = model.test_on_batch(X_test, y_test)

    # Reduce dimension to 2D using PCA and plot the results
    y_pred = np.argmax(model.predict(X_test), axis=1)
    Plot().plot_in_2d(X_test,
                      y_pred,
                      title="Evolutionary Evolved Neural Network",
                      accuracy=accuracy,
                      legend_labels=range(y.shape[1]))
def main():

    X, y = datasets.make_classification(n_samples=1000, n_features=10, n_classes=4, n_clusters_per_class=1, n_informative=2)

    data = datasets.load_iris()
    X = normalize(data.data)
    y = data.target
    y = to_categorical(y.astype("int"))

    # Model builder
    def model_builder(n_inputs, n_outputs):
        model = NeuralNetwork(optimizer=Adam(), loss=CrossEntropy)
        model.add(Dense(16, input_shape=(n_inputs,)))
        model.add(Activation('relu'))
        model.add(Dense(n_outputs))
        model.add(Activation('softmax'))

        return model

    # Print the model summary of a individual in the population
    print ("")
    model_builder(n_inputs=X.shape[1], n_outputs=y.shape[1]).summary()

    population_size = 100
    n_generations = 10

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1)

    inertia_weight = 0.8
    cognitive_weight = 0.8
    social_weight = 0.8

    print ("Population Size: %d" % population_size)
    print ("Generations: %d" % n_generations)
    print ("")
    print ("Inertia Weight: %.2f" % inertia_weight)
    print ("Cognitive Weight: %.2f" % cognitive_weight)
    print ("Social Weight: %.2f" % social_weight)
    print ("")

    model = ParticleSwarmOptimizedNN(population_size=population_size,
                        inertia_weight=inertia_weight,
                        cognitive_weight=cognitive_weight,
                        social_weight=social_weight,
                        max_velocity=5,
                        model_builder=model_builder)

    model = model.evolve(X_train, y_train, n_generations=n_generations)

    loss, accuracy = model.test_on_batch(X_test, y_test)

    print ("Accuracy: %.1f%%" % float(100*accuracy))

    # Reduce dimension to 2D using PCA and plot the results
    y_pred = np.argmax(model.predict(X_test), axis=1)
    Plot().plot_in_2d(X_test, y_pred, title="Particle Swarm Optimized Neural Network", accuracy=accuracy, legend_labels=range(y.shape[1]))
Beispiel #5
0
 def gen_num_seq(nums):
     """ Method which generates sequence of numbers """
     X = np.zeros([nums, 10, 20], dtype=float)
     y = np.zeros([nums, 10, 20], dtype=float)
     for i in range(nums):
         start = np.random.randint(0, 10)
         num_seq = np.arange(start, start + 10)
         X[i] = to_categorical(num_seq, n_col=20)
         y[i] = np.roll(X[i], -1, axis=0)
     y[:, -1, 1] = 1  # Mark endpoint as 1
     return X, y
Beispiel #6
0
 def gen_mult_ser(nums):
     """ Method which generates multiplication series """
     X = np.zeros([nums, 10, 61], dtype=float)
     y = np.zeros([nums, 10, 61], dtype=float)
     for i in range(nums):
         start = np.random.randint(2, 7)
         mult_ser = np.linspace(start, start * 10, num=10, dtype=int)
         X[i] = to_categorical(mult_ser, n_col=61)
         y[i] = np.roll(X[i], -1, axis=0)
     y[:, -1, 1] = 1  # Mark endpoint as 1
     return X, y
Beispiel #7
0
    def fit(self, X, y):
        y = to_categorical(y)

        y_pred = np.zeros(np.shape(y))
        for i in self.bar(range(self.n_estimators)):
            tree = self.trees[i]
            y_and_pred = np.concatenate((y, y_pred), axis=1)
            tree.fit(X, y_and_pred)
            update_pred = tree.predict(X)

            y_pred -= np.multiply(self.learning_rate, update_pred)
Beispiel #8
0
def main():
    data = datasets.load_digits()
    X = normalize(data.data)
    y = data.target

    # Convert the nominal y values to binary
    y = to_categorical(y)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, seed=1)

    # MLP
    clf = MultilayerPerceptron(n_hidden=16,
        n_iterations=1000,
        learning_rate=0.01)

    clf.fit(X_train, y_train)
    y_pred = np.argmax(clf.predict(X_test), axis=1)
    y_test = np.argmax(y_test, axis=1)

    accuracy = accuracy_score(y_test, y_pred)
    print ("Accuracy:", accuracy)

    # Reduce dimension to two using PCA and plot the results
    Plot().plot_in_2d(X_test, y_pred, title="Multilayer Perceptron", accuracy=accuracy, legend_labels=np.unique(y))
Beispiel #9
0
 def fit(self, X, y):
     y = to_categorical(y)
     super(GradientBoostingClassifier, self).fit(X, y)
def main():

    #----------
    # Conv Net
    #----------

    optimizer = Adam()

    data = datasets.load_digits()
    X = data.data
    y = data.target

    # Convert to one-hot encoding
    y = to_categorical(y.astype("int"))

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.4,
                                                        seed=1)

    # Reshape X to (n_samples, channels, height, width)
    X_train = X_train.reshape((-1, 1, 8, 8))
    X_test = X_test.reshape((-1, 1, 8, 8))

    clf = NeuralNetwork(optimizer=optimizer,
                        loss=CrossEntropy,
                        validation_data=(X_test, y_test))

    clf.add(
        Conv2D(n_filters=16,
               filter_shape=(3, 3),
               stride=1,
               input_shape=(1, 8, 8),
               padding='same'))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.25))
    clf.add(BatchNormalization())
    clf.add(Conv2D(n_filters=32, filter_shape=(3, 3), stride=1,
                   padding='same'))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.25))
    clf.add(BatchNormalization())
    clf.add(Flatten())
    clf.add(Dense(256))
    clf.add(Activation('relu'))
    clf.add(Dropout(0.4))
    clf.add(BatchNormalization())
    clf.add(Dense(10))
    clf.add(Activation('softmax'))

    print()
    clf.summary(name="ConvNet")

    train_err, val_err = clf.fit(X_train, y_train, n_epochs=50, batch_size=256)

    # Training and validation error plot
    n = len(train_err)
    training, = plt.plot(range(n), train_err, label="Training Error")
    validation, = plt.plot(range(n), val_err, label="Validation Error")
    plt.legend(handles=[training, validation])
    plt.title("Error Plot")
    plt.ylabel('Error')
    plt.xlabel('Iterations')
    plt.show()

    _, accuracy = clf.test_on_batch(X_test, y_test)
    print("Accuracy:", accuracy)

    y_pred = np.argmax(clf.predict(X_test), axis=1)
    X_test = X_test.reshape(-1, 8 * 8)
    # Reduce dimension to 2D using PCA and plot the results
    Plot().plot_in_2d(X_test,
                      y_pred,
                      title="Convolutional Neural Network",
                      accuracy=accuracy,
                      legend_labels=range(10))
Beispiel #11
0
# ........
#  TRAIN
# ........
print("Training:")
print("- Adaboost")
adaboost.fit(X_train, rescaled_y_train)
print("- Decision Tree")
decision_tree.fit(X_train, y_train)
print("- Gradient Boosting")
gbc.fit(X_train, y_train)
print("- LDA")
lda.fit(X_train, y_train)
print("- Logistic Regression")
logistic_regression.fit(X_train, y_train)
print("- Multilayer Perceptron")
mlp.fit(X_train, to_categorical(y_train), n_epochs=300, batch_size=50)
print("- Naive Bayes")
naive_bayes.fit(X_train, y_train)
print("- Perceptron")
perceptron.fit(X_train, to_categorical(y_train))
print("- Random Forest")
random_forest.fit(X_train, y_train)
print("- Support Vector Machine")
support_vector_machine.fit(X_train, rescaled_y_train)
print("- XGBoost")
xgboost.fit(X_train, y_train)

# .........
#  PREDICT
# .........
y_pred = {}